prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
@author: charl
"""
# code to go alongside Applied Tracking: Pressure - https://www.opengoalapp.com/tracking-pressure
from MetricaUtils import Reformat
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import matplotlib
from mplsoccer.pitch import Pitch
imported_home = pd.read_csv('data/Sample_Game_1_RawTrackingData_Home_Team.csv', skiprows=2) # ignore first 2 unneeded rows on inport
imported_away = pd.read_csv('data/Sample_Game_1_RawTrackingData_Away_Team.csv', skiprows=2)
imported_events = pd.read_csv('data/Sample_Game_1_RawEventsData.csv')
timestep = imported_home["Time [s]"].iloc[1] - imported_home["Time [s]"].iloc[0] # find timestep of imported data from csv
# pitch dimension in metres
PITCH_XDIM = 105
PITCH_YDIM = 68
frames_per_sample = 1 # how often will we sample the data - e.g. 5 = sample every 5 frames, 1 = keep every frame
sampled_timestep= frames_per_sample * timestep
tracking_home = imported_home.iloc[::frames_per_sample]
tracking_away = imported_away.iloc[::frames_per_sample]
tracking_home = Reformat(tracking_home) # reformat into more user friendly form
tracking_away = Reformat(tracking_away)
#change locs to actual vals in metres
tracking_home['x_loc'] = tracking_home['x_loc']*PITCH_XDIM
tracking_home['y_loc'] = tracking_home['y_loc']*PITCH_YDIM
tracking_away['x_loc'] = tracking_away['x_loc']*PITCH_XDIM
tracking_away['y_loc'] = tracking_away['y_loc']*PITCH_YDIM
imported_events[['start_x','end_x']] = imported_events[['start_x','end_x']]*PITCH_XDIM
imported_events[['start_y','end_y']] = imported_events[['start_y','end_y']]*PITCH_YDIM
#select just passes from events feed
passes = imported_events[imported_events['type']=='PASS']
#focus on home team passes
home_passes = passes[passes['team']=='Home']
#find the tracking frames associated with a pass receive event
home_receive_frames = list(home_passes['end_frame'])
#get the location of opposition players for said frames
home_receive_opplocs = tracking_away[tracking_away['frame'].isin(home_receive_frames)]
#convert this data back into wide format (pivot being opposite of melt) for this analysis as it is, for me, easier to visualise
hro_grouped = home_receive_opplocs.pivot(index='frame', columns='player', values=['x_loc', 'y_loc'])
hro_ball = hro_grouped[[('x_loc','Ball'),('y_loc','Ball')]]
hro_grouped = hro_grouped.drop([('x_loc','Ball'),('y_loc','Ball')], axis=1)
# create and fill dictionary with square of distance between location and ball for both x and y axis
xdiffs = {}
ydiffs = {}
for column in hro_grouped:
xdiff2 = (hro_grouped[column]-hro_ball[('x_loc','Ball')])**2
xdiffs[column] = xdiff2
ydiff2 = (hro_grouped[column]-hro_ball[('y_loc','Ball')])**2
ydiffs[column] = ydiff2
# convert to df and only keep valid distances
xdiffs_df = pd.DataFrame(xdiffs)
xdiffs_df = xdiffs_df.filter(like='x_loc')
ydiffs_df = | pd.DataFrame(ydiffs) | pandas.DataFrame |
import logging
try:
from openbabel import pybel # ob 3.0.0
except ImportError: # ob 2.4
import pybel
import re
import numpy as np
import pandas as pd
import pymongo
from bson.objectid import ObjectId
from rdkit import Chem
from rdkit.Chem import rdFMCS
from aqc_utils.helper_classes import config
from aqc_utils.helper_functions import add_numbers_to_repeated_items
logger = logging.getLogger(__name__)
desc_presets = ['global', 'min_max', 'substructure', 'core', 'labeled', 'transitions']
desc_presets_long = ['Global', 'Min Max Atomic', 'Substructure Atomic', 'Common Core Atomic', 'Labeled Atomic',
"Excited State Transitions"]
conf_options = ['boltzmann', 'max', 'min', 'mean', 'std', 'any']
conf_options_long = ['Boltzman Average', 'Lowest Energy Conformer', 'Highest Energy Conformer', 'Arithmetic Average',
'Standard Deviation', 'Random']
class InconsistentLabelsException(Exception):
"""Raised when a set of molecules is inconsistently labeled"""
pass
def db_connect(collection=None) -> pymongo.collection.Collection:
"""Create a connection to the database and return the table (Collection).
:return: pymongo.collection.Collection
"""
cli = pymongo.MongoClient("mongodb+srv://julosdu13:[email protected]/Cluster0?retryWrites=true&w=majority")
#db = client.test
# cli = pymongo.MongoClient(config['mongoDB']['host'],
# username=config['mongoDB']['user'],
# password=config['mongoDB']['password'],
# port=config['mongoDB']['port'])
if collection is None:
return cli['dft_for_nicoupling']
else:
return cli['dft_for_nicoupling'][collection]
def db_upload_molecule(can, tags, metadata, weights, conformations, logs) -> ObjectId:
"""Upload single molecule to DB and all child objects tags, features
and log files for its conformations"""
db = db_connect()
mols_coll = db['molecules']
tags_coll = db['tags']
# create molecule record and insert it
mol_data = {'can': can, 'metadata': metadata}
# try/except added by julosdu13
try:
ret = mols_coll.insert_one(mol_data)
mol_id = ret.inserted_id
# insert tag record
for tag in tags:
tags_coll.insert_one({'tag': tag, 'molecule_id': mol_id, 'can': can})
for weight, conformation, log in zip(weights, conformations, logs):
db_upload_conformation(mol_id, can, weight, conformation, log, check_mol_exists=False)
except:
mol_id = None
return mol_id
def db_upload_conformation(mol_id, can, weight, conformation, log, check_mol_exists=True):
"""Upload single conformation features and log file to DB, requires a molecule
to be present"""
db = db_connect()
# check if the molecule with a given id exists in the DB
mols_coll = db["molecules"]
if check_mol_exists:
assert mols_coll.find_one({'_id': mol_id}) is not None
# connect to features and logs collections
feats_coll = db['qchem_descriptors']
logs_coll = db['log_files']
data = {'molecule_id': mol_id, 'weight': weight, 'can': can}
# update with descriptors
data.update(conformation)
# db insertion
feats_coll.insert_one(data)
logs_coll.insert_one({'molecule_id': mol_id, 'log': log, 'can': can})
def db_delete_molecule(mol_id):
"""Delete molecule from DB, cascade all child objects: tags, features and log files"""
db = db_connect()
if isinstance(mol_id, str):
mol_id = ObjectId(mol_id)
print(mol_id)
db['qchem_descriptors'].delete_many({"molecule_id": mol_id}) # features
db['log_files'].delete_many({"molecule_id": mol_id}) # log files
db['tags'].delete_many({"molecule_id": mol_id}) # tags
db['molecules'].delete_one({"_id": mol_id}) # molecule itself
def db_select_molecules(cls=None, subcls=None, type=None, subtype=None, tags=[], substructure="") -> pd.DataFrame:
"""Get a summary frame of molecules in the database
:param tags: a list of tags of the db records (if multiple an 'OR' is taken)
:type tags: list
:param substructure: substructure SMARTS string
:type substructure: str
:return: pandas.core.frame.DataFrame
"""
db = db_connect()
tags_coll = db['tags']
mols_coll = db['molecules']
feats_coll = db['qchem_descriptors']
tags_cur = tags_coll.find({'tag': {'$in': tags}} if tags else {})
tags_df = pd.DataFrame(tags_cur)
filter = {}
if cls != "" and cls is not None:
filter['metadata.class'] = cls
if subcls != "" and subcls is not None:
filter['metadata.subclass'] = subcls
if type != "" and type is not None:
filter['metadata.type'] = type
if subtype != "" and subtype is not None:
filter['metadata.subtype'] = subtype
filter['_id'] = {'$in': tags_df.molecule_id.tolist()}
mols_cur = mols_coll.find(filter)
mols_df = pd.DataFrame(mols_cur)
if 'name' not in mols_df.columns:
mols_df['name'] = None
if substructure:
pattern = pybel.Smarts(substructure)
mols_df['pybel_mol'] = mols_df['can'].map(lambda can: pybel.readstring("smi", can))
mols_df = mols_df[mols_df['pybel_mol'].map(lambda mol: bool(pattern.findall(mol)))]
mols_df = mols_df.drop('pybel_mol', axis=1)
# merge tags in an outer way
df = pd.merge(mols_df, tags_df, how='outer', left_on='_id', right_on='molecule_id', suffixes=('', '_tag'))
# make tags into a list of tags
df['metadata_str'] = df['metadata'].map(repr)
grouped = df.groupby(['can', 'metadata_str'])
# groupby tags
df = pd.concat([grouped['metadata', 'molecule_id', 'name'].first(),
grouped['tag'].apply(list)], axis=1).reset_index().drop('metadata_str', axis=1)
# fetch ids and weights
feats_cur = feats_coll.find({'molecule_id': {'$in': df.molecule_id.tolist()}},
{'_id': 1, 'weight': 1, 'molecule_id': 1})
feats_df = pd.DataFrame(feats_cur)
feats_df = feats_df.groupby('molecule_id').agg(list).reset_index()
feats_df = feats_df.rename(columns={'_id': '_ids', 'weight': 'weights'})
# merge into df
df = df.merge(feats_df, on='molecule_id')
df['num_conformers'] = df['_ids'].map(len)
return df
def db_check_exists(can, gaussian_config, max_num_conformers) -> tuple:
"""Check if a molecule is already present in the database with the same Gaussian config (theory, basis_sets, etc.)
:param can: canonical smiles
:type can: str
:param gaussian_config: gaussian config dictionary
:type gaussian_config: dict
:return: exists(bool), list of tags that are associated with the molecule if it exists
"""
db = db_connect()
mols_coll = db['molecules']
tags_coll = db['tags']
mol_id = mols_coll.find_one({"can": can,
"metadata.gaussian_config": gaussian_config,
"metadata.max_num_conformers": max_num_conformers},
{"molecule_id": 1})
exists, tags = False, []
if mol_id is not None:
exists = True
tags = tags_coll.distinct('tag', {'molecule_id': ObjectId(mol_id['_id'])})
return exists, tags
def descriptors(cls, subcls, type, subtype, tags, presets, conf_option, substructure="") -> dict:
"""Retrieve DFT descriptors from the database
:param tag: metadata.tag of the db records
:type tag: str
:param presets: list of descriptor presets from 'global' (molecule level descriptors), \
'min_max' (min and max for each atomic descriptor across the molecule), 'substructure' \
(atomic descriptors for each atom in the substructure)
:type presets: list
:param conf_option: conformer averaging option: 'boltzmann' (Boltzmann average), \
'max' (conformer with highest weight), 'mean' (arithmetic average), 'min' (conformer with smallest weight), \
'any' (any single conformer), 'std' (std dev. over conformers)
:type conf_option: str
:param substructure: substructure SMARTS string
:type substructure: str
:return:
"""
# don't bother with extraction if there are not presets nor conf_option
if not presets or not conf_option:
logger.warning(f"One of options 'presets' or 'conf_option' is empty. Not extracting.")
return {}
# check that presets are ok
if not all(p in desc_presets for p in presets):
logger.warning(f"One of the presets in {presets} is not from allowed list {desc_presets}. Not extracting.")
return {}
# check that conf option is ok
if conf_option not in conf_options:
logger.warning(f"Conf_option {conf_option} is not one of the allowed options {conf_options}. Not extracting.")
return {}
mol_df = db_select_molecules(cls=cls, subcls=subcls, type=type, subtype=subtype,
tags=tags, substructure=substructure)
# TODO making DB queries inside a loop is very inefficient, this code should be reorganized to use single query
descs_df = mol_df.set_index('can')['_ids'].map(lambda l: descriptors_from_list_of_ids(l, conf_option=conf_option))
data = {}
if 'global' in presets:
dg = pd.concat([d['descriptors'] for can, d in descs_df.iteritems()], axis=1, sort=True)
dg.columns = descs_df.index
data['global'] = dg.T
if 'min_max' in presets:
dmin = pd.concat([d['atom_descriptors'].min() for can, d in descs_df.iteritems()], axis=1, sort=True)
dmax = pd.concat([d['atom_descriptors'].max() for can, d in descs_df.iteritems()], axis=1, sort=True)
dmin.columns = descs_df.index
dmax.columns = descs_df.index
data['min'] = dmin.T
data['max'] = dmax.T
if 'transitions' in presets:
# select top 3 transitions by oscillation strength
ts = pd.concat([d['transitions'].sort_values("ES_osc_strength",
ascending=False).head(10).reset_index(drop=True).unstack()
for can, d in descs_df.iteritems()], axis=1, sort=True)
ts.index = ts.index.map(lambda i: "_".join(map(str, i)))
ts.columns = descs_df.index
data['transitions'] = ts.T
if 'substructure' in presets and substructure:
sub = pybel.Smarts(substructure)
# these matches are numbered from 1, so subtract one from them
matches = descs_df.index.map(lambda c: sub.findall(pybel.readstring("smi", c))[0])
matches = matches.map(lambda x: (np.array(x) - 1).tolist())
# fetch atom labels for this smarts using the first molecule
sub_labels = pd.Series(descs_df.iloc[0]['labels']).loc[matches[0]].tolist()
sub_labels = add_numbers_to_repeated_items(sub_labels)
sub_labels = [f"atom{i + 1}" for i in range(len(matches[0]))]
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
for i, label in enumerate(sub_labels):
# data[label] = pd.concat([row['descs']['atom_descriptors'].loc[row['matches'][i]]
# for c, row in tmp_df.iterrows()], axis=1)
to_concat = []
for c, row in tmp_df.iterrows():
atom_descs = row['descs']['atom_descriptors']
atom_descs['labels'] = row['descs']['labels']
to_concat.append(atom_descs.iloc[row['matches'][i]])
data[label] = pd.concat(to_concat, axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
if 'core' in presets:
cans = mol_df['can'].tolist()
rd_mols = {can: Chem.MolFromSmiles(can) for can in cans}
# occasionally rdkit cannot create a molecule from can that openbabel can
# this is typically due to dative bonds, dative
for can, rd_mol in rd_mols.items():
if rd_mol is None:
logger.warning(f"Molecule with can: {can} cannot be constructed directly by rdkit.")
rd_mols[can] = Chem.MolFromSmarts(can) # create it from smarts
# run MCS if there is more than 1 molecule
if len(rd_mols) > 1:
core_smarts = rdFMCS.FindMCS(list(rd_mols.values())).smartsString
else: # otherwise use the entire smiles as smarts string
core_smarts = Chem.MolToSmarts(list(rd_mols.values())[0])
# create an rdkit smarts
core = Chem.MolFromSmarts(core_smarts)
# get the first match, if multiple substructure matches exist
matches = {can: rd_mols[can].GetSubstructMatches(core)[0] for can in cans}
matches = pd.Series(matches).map(list)
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
# fetch atom labels for this smarts using the first molecule
row = tmp_df.iloc[0]
row_labels = pd.Series(row['descs']['labels'])
row_labels = row_labels[~row_labels.str.startswith('H')] # need to remove hydrogens
sub_labels = row_labels.iloc[row['matches']].tolist()
sub_labels = add_numbers_to_repeated_items(sub_labels)
for i, label in enumerate(sub_labels):
to_concat = []
for c, row in tmp_df.iterrows():
atom_descs = row['descs']['atom_descriptors']
atom_descs['labels'] = row['descs']['labels']
atom_descs = atom_descs[~atom_descs['labels'].str.startswith("H")] # need to remove hydrogens
to_concat.append(atom_descs.iloc[row['matches'][i]])
data[label] = pd.concat(to_concat, axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
if 'labeled' in presets:
# extract the positions of the labeled atoms in the atom lists for each molecule
labels = descs_df.map(lambda d: [re.sub("\D", "", l) for l in d['labels']])
labels = labels.map(lambda ls: [(index, l) for index, l in enumerate(ls) if l])
labels = labels.map(lambda ls: sorted(ls, key=lambda l: l[1]))
# verify that the atomic labels are consistent across all molecules
atom_numbers = labels.map(lambda ls: [l[1] for l in ls])
atom_numbers_dedup = atom_numbers.map(tuple).drop_duplicates()
if len(atom_numbers_dedup) == 1:
matches = labels.map(lambda ls: [l[0] for l in ls])
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
for i, label in enumerate(atom_numbers_dedup.iloc[0]):
label = 'A' + label
data[label] = pd.concat([row['descs']['atom_descriptors'].loc[row['matches'][i]]
for c, row in tmp_df.iterrows()], axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
else:
logger.warning("Atomic labels are inconsistent. Not all molecules have the same set of labeled atoms")
raise InconsistentLabelsException
return data
def descriptors_from_list_of_ids(ids, conf_option='max') -> dict:
"""Get and average descriptors using a list of db ids.
:param ids: list of db id's that correspond to a given molecule
:type ids: list
:param conf_option: conformer averaging option: 'boltzmann' (Boltzmann average), \
'max' (conformer with highest weight), 'mean' (arithmetic average), 'min' (conformer with smallest weight), \
'any' (any single conformer), 'std' (std dev. over conformers)
:type conf_option: str
:return: dict
"""
# check that conf option is ok
if conf_option not in conf_options:
logger.warning(f"Conf_option {conf_option} is not one of the allowed options {conf_options}. Not extracting.")
return {}
# connect to db
feats_coll = db_connect("qchem_descriptors")
# fetch db _ids and weights and can
cursor = feats_coll.find({"_id": {"$in": ids}}, {'weight': 1, 'molecule_id': 1})
recs = pd.DataFrame(cursor).sort_values('weight', ascending=False)
# assert that all ids come from the same can, and that weights sum to 1.
assert len(recs.molecule_id.unique()) == 1
assert abs(recs.weight.sum() - 1.) < 1e-6
# set trivial option for the case with only one conformation
if len(recs) == 1:
conf_option = 'any'
# single conf options
if conf_option in ['min', 'max', 'any']:
if conf_option == 'max':
_id = recs['_id'].iloc[0]
elif conf_option == 'min':
_id = recs['_id'].iloc[-1]
else:
_id = recs['_id'].sample(1).iloc[0]
# return pandatized record for a chosen id
return _pandatize_record(feats_coll.find_one({"_id": _id}))
rec = {}
if conf_option in ['boltzmann', 'mean', 'std']:
# fetch db records for these _ids
cursor = feats_coll.find({"_id": {"$in": ids}})
recs = [_pandatize_record(record) for record in cursor]
rec.update({"labels": recs[0]['labels']})
keys_to_reweight = ['descriptors', 'atom_descriptors', 'modes', 'transitions']
for key in keys_to_reweight:
if conf_option == 'boltzmann':
dfs = pd.concat(r[key] * r['weight'] for r in recs)
rec[key] = dfs.groupby(dfs.index, sort=False).sum()
if conf_option in ['mean', 'std']:
dfs = pd.concat(r[key] for r in recs)
if conf_option == 'mean':
rec[key] = dfs.groupby(dfs.index, sort=False).mean()
elif conf_option == 'std':
rec[key] = dfs.groupby(dfs.index, sort=False).std()
return rec
def _pandatize_record(record) -> dict:
"""Convert json structures to pandas structures for an individual
db record of a single conformation.
:param record: db record of a single conformation
:return: dict
"""
del record['descriptors']['stoichiometry']
record['descriptors'] = pd.Series(record['descriptors']).astype(float)
record['modes'] = pd.DataFrame(record['modes']).astype(float)
record['transitions'] = pd.DataFrame(record['transitions']).astype(float)
record['atom_descriptors'] = pd.DataFrame(record['atom_descriptors']).astype(float)
if record['mode_vectors'] is not None:
record['mode_vectors'] = pd.DataFrame(record['mode_vectors'])
record['mode_vectors']['atom_idx'] = list(range(len(record['labels']))) * 3 * record['modes'].shape[0]
record['mode_vectors'] = record['mode_vectors'].set_index(['mode_number', 'axis', 'atom_idx']).unstack(
['mode_number', 'axis'])
record['mode_vectors'] = record['mode_vectors'].droplevel(0, axis=1).astype(float)
else:
record['mode_vectors'] = | pd.DataFrame() | pandas.DataFrame |
# Version 1.0. <NAME>
# Copyright (c) 2019 Potsdam university
# read result tables from PV fischer.c code (by <NAME>:https://github.com/CullanHowlett/PV_fisher)
import csv
import glob
import pandas as pd
res_dir = "./results_new_2904"
def get_pars_from_table(table):
"""
read the Full redshift range values of
zeff, fsigma8(z_eff), percentage error(z_eff)
from the files produced by the fischer code.
"""
print ("reading results from file: %s"%table)
out = {}
with open(table, 'r') as tab:
lines = tab.readlines()
out['n_sample'] = float(lines[0].split(":")[-1].strip())
if lines[1].split(":")[-1].strip() == "False":
out['w_rsd'] = False
else:
out['w_rsd'] = True
print(lines[1].split(":")[-1].strip())
print(bool(lines[1].split(":")[-1].strip()))
out['pv_d_error'] = float(lines[2].split(":")[-1].strip())
out['sky_area'] = float(lines[3].split(":")[-1].strip())
#out['zmax'] = float(lines[4].split(":")[-1].strip())
out['k_max'] = float(lines[4].split(":")[-1].strip())
out['free_paramters'] = float(lines[5].split(":")[-1].strip())
last_line = lines[-1]
pieces = [p.strip() for p in last_line.strip().split(" ") if p not in ['']]
out['zeff'] = pieces[-3]
out['fsigma8'] = pieces[-2]
out['%error'] = pieces[-1]
#print (repr(out))
return out
# read all the tables, save average values of parameters
tables = glob.glob(res_dir+"/fisher*.csv")
print ("found %d table files in %s"%(len(tables), res_dir))
results = []
for table in tables:
out = get_pars_from_table(table)
results.append(out)
#save the extracted results into one big table
#outfile = "global_taibresult.csv"
outfile = "global_Magnitudes_result.csv"
df = | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mapi_func
import pandas as pd
from itertools import groupby
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, get_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in groupby(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.get('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in groupby(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["elastic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["elastic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["elastic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif isinstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.get('rng'):
payload['rng'] = map(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = pd.np.log(pd.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.get('rng'): # dH or dS # delta on the x-axis
x_val = | pd.np.linspace(0.01, 0.49, num=100) | pandas.np.linspace |
import os
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
# features
DATE = "date"
YEAR = "year"
MONTH = "month"
DAY = "day"
ID = 'id'
PRICE = 'price'
ZIP = 'zipcode'
RESPONSE_COLUMN = 'price'
FEATURES_TO_DROP = ['id', 'date', 'yr_renovated', 'zipcode'] # we want to drop the house ID
NUM_AREAS = 20
HOUSES_FILE = r"IML.HUJI\datasets\house_prices.csv"
EVALUATION_OUTPUT_PATH = r"C:\Users\rimon\Desktop\2021-2022 semester B\IML\ex2\feature_evaluation_graph"
TRAIN_RATIO = 0.75
#Q4
Q4_OUTPUT_PATH = r"C:\Users\rimon\Desktop\2021-2022 semester B\IML\ex2\a "
Q4_GRAPH_TITLE = "average loss as a function of training size"
Q4_X_TITLE = "training-data size"
Q4_Y_TITLE = "average loss"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
initial_dataset = pd.read_csv(filename)
samples_mat, response = preprocess_dataset(initial_dataset, RESPONSE_COLUMN)
return (samples_mat, response)
def preprocess_dataset(initial_dataset: pd.DataFrame,
response_label) -> (pd.DataFrame, pd.Series):
"""
preprocessing the data set to include only
the relevance features and samples, and won't include the response
:param response_label: the name of the column of the response.
this column will be removed from the features of the dataset
:param initial_dataset: the initial dataset
:return: samples matrix after preprocess
"""
# remove samples that have missing features
samples_dataset = initial_dataset.dropna(how="any")
# remove duplicated samples
samples_dataset = samples_dataset.drop_duplicates()
# remove samples with invalid features range
samples_dataset = samples_dataset[samples_dataset.price > 0]
samples_dataset = samples_dataset[samples_dataset.bedrooms < 15]
# split the date feature
date = samples_dataset["date"]
samples_dataset["year_sold"], samples_dataset["month_sold"], samples_dataset["day_sold"] = \
date.str[0:4].astype(int), date.str[4:6].astype(int), date.str[6:8].astype(int)
# normalize the above and basement features
samples_dataset['sqft_above'] = samples_dataset['sqft_above'] / \
samples_dataset['sqft_living']
samples_dataset['sqft_basement'] = samples_dataset['sqft_basement'] / \
samples_dataset['sqft_living']
# add years since last renovated- where the yr_built counts as reanovation year.
last_renovation = samples_dataset[['yr_built', 'yr_renovated']].max(axis=1)
years_since_renovated = samples_dataset['year_sold'] - last_renovation
samples_dataset['yrs_since_renovated'] = years_since_renovated
# quantization of the zipcodes to area-groups
samples_dataset['zipcode_area'] = pd.qcut(samples_dataset['zipcode'], NUM_AREAS, labels=False)
samples_dataset = | pd.get_dummies(samples_dataset, columns=['zipcode_area']) | pandas.get_dummies |
#adding required libraries
import pandas as pd
import pymysql
import openpyxl
from openpyxl.styles import Font
from docx import Document
from docx2pdf import convert
import PyPDF2 as pdf
import os
import json
#connecting to the database
db = pymysql.connect('localhost', 'root', None, 'contactbook')
cursor = db.cursor()
#dummy data
df = {'First_name': [''],
'Last_name': [''],
'Mobile_no': []
}
#insert function
def insert_data():
first_name = str(input("Enter your first name : "))
last_name = str(input("Enter your last name : "))
mobile = int(input("Enter your mobile number : "))
sql = "INSERT INTO CONTACTS(FIRST_NAME," \
"LAST_NAME, CONTACT) " \
"VALUES ('%s', '%s', '%s' )" % \
(first_name, last_name, mobile)
try:
cursor.execute(sql)
db.commit()
except Exception:
db.rollback()
#display function
def Dis():
sql = """SELECT * FROM contacts"""
cursor.execute(sql)
rows = cursor.fetchall()
print('first name lastname Contact no')
for row in rows:
print(row)
#delete function
def delete_data():
var = input("Enter the first name of the person's record to be deleted : ")
sql = "DELETE FROM CONTACTS WHERE first_name = '%s'" % var
try:
cursor.execute(sql)
db.commit()
cursor.close()
except Exception:
db.rollback()
print("Deleted Successfully")
#function to convert to csv
def convert_csv():
#df = open(r"Saves\phonebook.csv",'wb')
#df.close()
#df = pd.read_csv(r'Saves\phonebook.csv')
df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
class TestBasicDataTypes(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_int_scalar(self):
re = self.s.run("100")
self.assertEqual(re, 100)
re = self.s.run("int()")
self.assertIsNone(re)
def test_bool_scalar(self):
re = self.s.run("true")
self.assertEqual(re, True)
re = self.s.run("bool()")
self.assertIsNone(re)
def test_char_scalar(self):
re = self.s.run("'a'")
self.assertEqual(re, 97)
re = self.s.run("char()")
self.assertIsNone(re)
def test_short_scalar(self):
re = self.s.run("112h")
self.assertEqual(re, 112)
re = self.s.run("short()")
self.assertIsNone(re)
def test_long_scalar(self):
re = self.s.run("22l")
self.assertEqual(re, 22)
re = self.s.run("long()")
self.assertIsNone(re)
def test_date_scalar(self):
re = self.s.run("2012.06.12")
self.assertEqual(re, np.datetime64('2012-06-12'))
re = self.s.run("date()")
self.assertIsNone(re)
def test_month_scalar(self):
re = self.s.run("2012.06M")
self.assertEqual(re, np.datetime64('2012-06'))
re = self.s.run("month()")
self.assertIsNone(re)
def test_time_scalar(self):
re = self.s.run("12:30:00.008")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:00.008'))
re = self.s.run("time()")
self.assertIsNone(re)
def test_minute_scalar(self):
re = self.s.run("12:30m")
self.assertEqual(re, np.datetime64('1970-01-01T12:30'))
re = self.s.run("minute()")
self.assertIsNone(re)
def test_second_scalar(self):
re = self.s.run("12:30:10")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:10'))
re = self.s.run("second()")
self.assertIsNone(re)
def test_datetime_scalar(self):
re = self.s.run('2012.06.13 13:30:10')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10'))
re = self.s.run("datetime()")
self.assertIsNone(re)
def test_timestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008'))
re = self.s.run("timestamp()")
self.assertIsNone(re)
def test_nanotime_scalar(self):
re = self.s.run('13:30:10.008007006')
self.assertEqual(re, np.datetime64('1970-01-01T13:30:10.008007006'))
re = self.s.run("nanotime()")
self.assertIsNone(re)
def test_nanotimestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008007006')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008007006'))
re = self.s.run("nanotimestamp()")
self.assertIsNone(re)
def test_float_scalar(self):
re = self.s.run('2.1f')
self.assertEqual(round(re), 2)
re = self.s.run("float()")
self.assertIsNone(re)
def test_double_scalar(self):
re = self.s.run('2.1')
self.assertEqual(re, 2.1)
re = self.s.run("double()")
self.assertIsNone(re)
def test_string_scalar(self):
re = self.s.run('"abc"')
self.assertEqual(re, 'abc')
re = self.s.run("string()")
self.assertIsNone(re)
def test_uuid_scalar(self):
re = self.s.run("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')")
self.assertEqual(re, '5d212a78-cc48-e3b1-4235-b4d91473ee87')
re = self.s.run("uuid()")
self.assertIsNone(re)
def test_ipaddr_sclar(self):
re = self.s.run("ipaddr('192.168.1.135')")
self.assertEqual(re, '192.168.1.135')
re = self.s.run("ipaddr()")
self.assertIsNone(re)
def test_int128_scalar(self):
re = self.s.run("int128('e1671797c52e15f763380b45e841ec32')")
self.assertEqual(re, 'e1671797c52e15f763380b45e841ec32')
re = self.s.run("int128()")
self.assertIsNone(re)
def test_python_datetime64_dolphindb_date_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('date', ts), np.datetime64('2019-01-01'))
def test_python_datetime64_dolphindb_month_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('month', ts), np.datetime64('2019-01'))
def test_python_datetime64_dolphindb_time_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('time', ts), np.datetime64('1970-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_minute_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('minute', ts), np.datetime64('1970-01-01T20:01'))
def test_python_datetime64_dolphindb_second_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('second', ts), np.datetime64('1970-01-01T20:01:01'))
def test_python_datetime64_dolphindb_datetime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('datetime', ts), np.datetime64('2019-01-01T20:01:01'))
def test_python_datetime64_dolphindb_timestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('timestamp', ts), np.datetime64('2019-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_nanotime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotime', ts), np.datetime64('1970-01-01T20:01:01.122346100'))
def test_python_datetime64_dolphindb_nanotimestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotimestamp', ts), np.datetime64('2019-01-01T20:01:01.122346100'))
def test_string_vector(self):
re = self.s.run("`IBM`GOOG`YHOO")
self.assertEqual((re == ['IBM', 'GOOG', 'YHOO']).all(), True)
re = self.s.run("['IBM', string(), 'GOOG']")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("[string(), string(), string()]")
self.assertEqual((re==['','','']).all(), True)
def test_function_def(self):
re = self.s.run("def f(a,b){return a+b}")
re = self.s.run("f(1, 2)")
self.assertEqual(re, 3)
def test_symbol_vector(self):
re = self.s.run("symbol(`IBM`MSFT`GOOG`BIDU)")
self.assertEqual((re == ['IBM', 'MSFT', 'GOOG', 'BIDU']).all(), True)
re = self.s.run("symbol(['IBM', '', 'GOOG'])")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("symbol(['', '', ''])")
self.assertEqual((re==['', '', '']).all(), True)
def test_char_vector(self):
re = self.s.run("['a', 'b', 'c']")
expected = [97, 98, 99]
self.assertEqual((re==expected).all(), True)
re = self.s.run("['a', char(), 'c']")
expected = [97.0, np.nan, 99.0]
assert_array_almost_equal(re, expected)
def test_bool_vector(self):
re = self.s.run("[true, false, true]")
expected = [True, False, True]
assert_array_equal(re, expected)
re = self.s.run("[true, false, bool()]")
assert_array_equal(re[0:2], [True, False])
self.assertTrue(np.isnan(re[2]))
re = self.s.run("[bool(), bool(), bool()]")
self.assertTrue(np.isnan(re[0]))
self.assertTrue(np.isnan(re[1]))
self.assertTrue(np.isnan(re[2]))
def test_int_vector(self):
re = self.s.run("2938 2920 54938 1999 2333")
self.assertEqual((re == [2938, 2920, 54938, 1999, 2333]).all(), True)
re = self.s.run("[2938, int(), 6552]")
expected = [2938.0, np.nan, 6552.0]
assert_array_almost_equal(re, expected, 1)
re = self.s.run("[int(), int(), int()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_short_vector(self):
re = self.s.run("[10h, 11h, 12h]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10h, short(), 12h]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[short(), short(), short()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_long_vector(self):
re = self.s.run("[10l, 11l, 12l]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10l, long(), 12l]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[long(), long(), long()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_double_vector(self):
re = self.s.run("rand(10.0,10)")
self.assertEqual(len(re), 10)
re = self.s.run("[12.5, 26.0, double()]")
expected = [12.5, 26.0, np.nan]
assert_array_almost_equal(re, expected)
re = self.s.run("[double(), double(), double()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_float_vector(self):
re = self.s.run("[12.5f, 26.34f, 25.896f]")
expected = [12.5, 26.34, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[12.5f, float(), 25.896f]")
expected = [12.5, np.nan, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[float(), float(), float()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_date_vector(self):
re = self.s.run("2012.10.01 +1..3")
expected = np.array(['2012-10-02','2012-10-03','2012-10-04'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01, date(), 2012.06.03]")
expected = np.array(['2012-06-01', 'NaT', '2012-06-03'], dtype="datetime64")
assert_array_equal(re, expected)
re = self.s.run("[date(), date(), date()]")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_month_vector(self):
re = self.s.run("[2012.06M, 2012.07M, 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('2012-07'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06M, month(), 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('NaT'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("take(month(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_time_vector(self):
re = self.s.run("[12:30:10.008, 12:30:10.009, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('1970-01-01T12:30:10.009'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[12:30:10.008, NULL, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('NaT'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(time(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_minute_vector(self):
re = self.s.run("[13:30m, 13:34m, 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('1970-01-01T13:34'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("[13:30m, minute(), 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("take(minute(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_second_vector(self):
re = self.s.run("[13:30:10, 13:30:11, 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('1970-01-01T13:30:11'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10, second(), 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("take(second(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_datetime_vector(self):
re = self.s.run("2012.10.01T15:00:04 + 2009..2011")
expected = np.array(['2012-10-01T15:33:33', '2012-10-01T15:33:34', '2012-10-01T15:33:35'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01T12:30:00, datetime(), 2012.06.02T12:30:00]")
expected = np.array(['2012-06-01T12:30:00', 'NaT', '2012-06-02T12:30:00'], dtype="datetime64")
assert_array_equal(re, expected)
def test_timestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008, 2012.06.13T13:30:10.009, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('2012-06-13T13:30:10.009'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008, NULL, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(timestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotime_vector(self):
re = self.s.run("[13:30:10.008007006, 13:30:10.008007007, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('1970-01-01T13:30:10.008007007'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10.008007006, NULL, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotime(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotimestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008007006, 2012.06.13T13:30:10.008007007, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('2012-06-13T13:30:10.008007007'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008007006, NULL, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotimestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_uuid_vector(self):
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '00000000-0000-0000-0000-000000000000', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['', '', ''])")
expected = ['00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000']
assert_array_equal(re, expected)
def test_ipaddr_vector(self):
re = self.s.run("ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14'])")
expected = ['192.168.1.135', '192.168.1.124', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['192.168.1.135', '', '192.168.1.14'])")
expected = ['192.168.1.135', '0.0.0.0', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['', '', ''])")
expected = ['0.0.0.0', '0.0.0.0', '0.0.0.0']
assert_array_equal(re, expected)
def test_int128_vector(self):
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', '', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', '00000000000000000000000000000000', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['', '', ''])")
expected = ['00000000000000000000000000000000', '00000000000000000000000000000000', '00000000000000000000000000000000']
assert_array_equal(re, expected)
def test_int_matrix(self):
re = self.s.run("1..6$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_short_matrix(self):
re = self.s.run("short(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_long_matrix(self):
re = self.s.run("long(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_double_matrix(self):
re = self.s.run("[1.1, 1.2, 1.3, 1.4, 1.5, 1.6]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_float_matrix(self):
re = self.s.run("[1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_symbol_matrix(self):
re = self.s.run('symbol("A"+string(1..9))$3:3')
expected = np.array([["A1","A4","A7"], ["A2","A5","A8"], ["A3","A6","A9"]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_huge_matrix(self):
re = self.s.run('matrix(loop(take{, 3000}, 1..3000))')
expected = np.arange(1, 3001)
for i in np.arange(0, 3000):
assert_array_equal(re[0][i], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_column_matrix(self):
re = self.s.run('matrix(1..3000000)')
for i in np.arange(0, 3000000):
assert_array_equal(re[0][i], [i+1])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_row_matrix(self):
re = self.s.run("matrix(take(1, 5000)).transpose()")
assert_array_equal(re[0], [np.repeat(1, 5000)])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_zero_column_matrix(self):
re = self.s.run("matrix(INT, 3, 0)")
expected = [[] for i in range(3)]
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,bool )
re = self.s.run("matrix(CHAR,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int8')
re = self.s.run("matrix(SHORT,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int16')
re = self.s.run("matrix(LONG,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int64')
re = self.s.run("matrix(DATE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MONTH,3,0)")
expected = np.empty((3,0),dtype="datetime64[M]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[M]')
re = self.s.run("matrix(TIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MINUTE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(SECOND,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(TIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(NANOTIME,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,3,0)")
expected = np.empty((3,0),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,"float64")
re = self.s.run("matrix(SYMBOL,3,0)")
assert_array_equal(re[0], expected)
def test_zero_row_matrix(self):
re = self.s.run("matrix(INT, 0, 3)")
expected = np.empty((0,3),dtype="int32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,0,3)")
expected = np.empty((0,3),dtype="bool")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(CHAR,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SHORT,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(LONG,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATE,0,3)")
expected = np.empty((0,3),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MONTH,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MINUTE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SECOND,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,0,3)")
expected = np.empty((0,3),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SYMBOL,0,3)")
assert_array_equal(re[0], expected)
def test_all_null_matrix(self):
re = self.s.run("take(int(), 12)$3:4")
expected=[[np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN]]
assert_array_equal(re[0], expected)
re = self.s.run("[1, 2, NULL, 3, NULL, 4]$2:3")
expected=[[1, np.NaN, np.NaN], [2., 3., 4.]]
assert_array_equal(re[0], expected)
re = self.s.run("symbol(take(string(), 12))$3:4")
assert_array_equal(re[0][0], ['','','',''])
assert_array_equal(re[0][1], ['','','',''])
assert_array_equal(re[0][2], ['','','',''])
re = self.s.run("symbol(['AA', 'BB', NULL, 'CC', NULL, 'DD'])$2:3")
assert_array_equal(re[0][0], ['AA','',''])
assert_array_equal(re[0][1], ['BB','CC','DD'])
def test_huge_symbol_matrix(self):
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);m")
assert_array_equal(re[1], np.arange(1, 201))
assert_array_equal(re[2], np.arange(1, 5001))
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);table(m.rowNames() as label, m)")
assert_array_equal(re["label"], np.arange(1, 201))
j=1
for i in np.arange(1, 5001):
assert_series_equal(re.iloc[:,i], pd.Series([str(x) for x in np.arange(j, j+200)], index=np.arange(0, 200)),check_names=False)
j+=200
def test_int_matrix_with_label(self):
re = self.s.run("cross(add,1..5,1..10)")
expected = np.array(
[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
[5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [6, 7, 8, 9, 10, 11, 12, 13, 14, 15]])
#self.assertEqual((re == expected).all(), True)
assert_array_equal(re[0], expected)
assert_array_equal(re[1], np.array([1, 2, 3, 4, 5]))
assert_array_equal(re[2], np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
def test_matrix_only_with_row_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1, 2],);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
assert_array_equal(re[1], [0, 1, 2])
self.assertIsNone(re[2])
def test_matrix_only_with_col_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1]);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
assert_array_equal(re[2], [0, 1])
def test_matrix_label_date_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(2012.01.01..2012.01.04, symbol(`C`IBM`MS));
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000','2012-01-03T00:00:00.000000000', '2012-01-04T00:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_second_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!([09:30:00, 10:00:00, 10:30:00, 11:00:00], `C`IBM`MS)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['1970-01-01T09:30:00.000000000', '1970-01-01T10:00:00.000000000','1970-01-01T10:30:00.000000000', '1970-01-01T11:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_symbol_date(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(`C`IBM`MS`ZZ, 2012.01.01..2012.01.03)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], ['C', 'IBM', 'MS', 'ZZ'])
assert_array_equal(re[2], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000',
'2012-01-03T00:00:00.000000000'],dtype="datetime64"))
def test_table(self):
script = '''n=20;
syms=`IBM`C`MS`MSFT`JPM`ORCL`BIDU`SOHU`GE`EBAY`GOOG`FORD`GS`PEP`USO`GLD`GDX`EEM`FXI`SLV`SINA`BAC`AAPL`PALL`YHOO`KOH`TSLA`CS`CISO`SUN;
mytrades=table(09:30:00+rand(18000,n) as timestamp,rand(syms,n) as sym, 10*(1+rand(100,n)) as qty,5.0+rand(100.0,n) as price);
select qty,price from mytrades where sym==`IBM;'''
re = self.s.run(script)
self.assertEqual(re.shape[1], 2)
def test_dictionary(self):
script = '''dict(1 2 3,`IBM`MSFT`GOOG)'''
re = self.s.run(script)
expected = {2: 'MSFT', 3: 'GOOG', 1: 'IBM'}
self.assertDictEqual(re, expected)
def test_any_vector(self):
re = self.s.run("([1], [2],[1,3, 5],[0.9, 0.8])")
self.assertEqual((re[0] == [1]).all(), True)
self.assertEqual((re[1] == [2]).all(), True)
self.assertEqual((re[2] == [1, 3, 5]).all(), True)
def test_set(self):
re = self.s.run("set(1+3*1..3)")
self.assertSetEqual(re, {10, 4, 7})
def test_pair(self):
re = self.s.run("3:4")
self.assertListEqual(re, list([3, 4]))
def test_any_dictionary(self):
re = self.s.run("{a:1,b:2}")
expected = {'a': 1, 'b': 2}
self.assertDictEqual(re, expected)
def test_upload_matrix(self):
a = self.s.run("cross(+, 1..5, 1..5)")
b = self.s.run("1..25$5:5")
self.s.upload({'a': a, 'b': b})
re = self.s.run('a+b')
# print(re)
# self.assertEqual((re[0] == [3, 9, 15, 21, 27]).all(), True)
# self.assertEqual((re[1] == [5, 11, 17, 23, 29]).all(), True)
# self.assertEqual((re[2] == [7, 13, 19, 25, 31]).all(), True)
# self.assertEqual((re[3] == [9, 15, 21, 27, 33]).all(), True)
# self.assertEqual((re[4] == [11, 17, 23, 29, 35]).all(), True)
def test_run_plot(self):
script = '''
x=1..10
t = table(x as sin, x+100 as cos)
plot(t)
'''
re = self.s.run(script)
assert_array_equal(re['data'][0], np.array([[1, 101], [2, 102], [3, 103], [4, 104], [5, 105], [6, 106], [7, 107], [8, 108], [9, 109], [10, 110]]))
self.assertIsNone(re['data'][1])
assert_array_equal(re['data'][2], np.array(['sin', 'cos']))
assert_array_equal(re['title'], np.array(['', '', '']))
def test_table_datatypes(self):
script='''
n = 200
a = 100
v1 = string(1..n)
v2 = string(1..n)
v3 = take(int128("fcc69bca9885b51962660c23d08c124a"),n-a).join(take(int128("a428d55098d8e41e8adc4b7d04d8ede1"),a))
v4 = take(uuid("407c628e-d319-25c1-17ee-e5a73500a010"),n-a).join(take(uuid("d7a39280-1b18-8f56-160c-beabd428c934"),a))
v5 = take(ipaddr("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"),n-a).join(take(ipaddr("fc00:db20:35b:7399::5"),a))
t = table(n:n,`val1`val2`val3`val4`val5,[SYMBOL,STRING,INT128,UUID,IPADDR])
t[`val1] = v1
t[`val2] = v2
t[`val3] = v3
t[`val4] = v4
t[`val5] = v5
'''
self.s.run(script)
df1 = self.s.run("select val1 from t")
df2 = self.s.run("select val2 from t")
df3 = self.s.run("select val3 from t")
df4 = self.s.run("select val4 from t")
df5 = self.s.run("select val5 from t")
df = self.s.run("select * from t")
n = 200
a = 100
data1 = np.array(range(1,n+1),dtype="str")
data2 = np.append(np.repeat("fcc69bca9885b51962660c23d08c124a",n-a),np.repeat("a428d55098d8e41e8adc4b7d04d8ede1",a))
data3 = np.append(np.repeat("407c628e-d319-25c1-17ee-e5a73500a010",n-a),np.repeat("d7a39280-1b18-8f56-160c-beabd428c934",a))
data4 = np.append(np.repeat("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",n-a),np.repeat("3fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",a))
ex1 = pd.DataFrame({"val1":data1})
ex2 = | pd.DataFrame({"val2":data1}) | pandas.DataFrame |
# Packages are imported.
import pandas as pd
import requests as req
import numpy as np
import datetime as dt
import time
import multiprocessing as mp
import os
import random
import sys
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import statsmodels.stats.multitest as statsmodels
import pickle
np.random.seed(1)
sys.setrecursionlimit(25000)
# Function that requests data. Change IP to external IP of one of the VM's collecting data.
def request_data(job, startTime, endTime):
print('Job request: ' + str(job) + ' started.')
#print(startTime)
#print(job)
#print(endTime)
request = req.get('http://35.246.188.180:30000/api/v1/query_range?query={job=~"' + job + '"}&start=' + startTime + 'Z&end=' + endTime + 'Z&step=1s')
#print(eval(request.content))
metric_data = pd.DataFrame(eval(request.content))
return metric_data
# Change IP to external IP of one of the VM's collecting data.
def get_jobs():
# Cluster: Get list of jobs
x = req.get('http://172.16.58.3:30000/api/v1/label/job/values')
jobList = eval(x.content)['data']
return jobList
# Get timestamps for next 15mins.
def get_timestamps(date):
scrape_timeframes = []
interval = dt.timedelta(minutes=14, seconds=59)
scrape_timeframes.append([str(date).replace(" ", "T"), str(date + interval).replace(" ", "T"), pd.date_range(date, (date + interval), freq='1S')])
return scrape_timeframes
# Info of features is saved in separate files.
def save_feature_info(job, dfList_info):
dfFeatures_info = pd.concat(dfList_info, axis=1)
dfFeatures_info.columns = [str(column) + dfFeatures_info.columns[column] for column in range(0, len(dfFeatures_info.columns))]
list_features_info = [dirs for dirs in os.listdir('data_scraped/feature_info/') if job in dirs]
file_number = len(list_features_info) + 1
dfFeatures_info.to_feather('data_scraped/feature_info/features_' + job + str(file_number) + '.ftr')
# Function to load in data and structure it into one dataframe.
def structureData(job, date):
scrape_timeframes = get_timestamps(date)
features_dict = {'Timestamp' : scrape_timeframes[0][2]}
dfMetric = request_data(job, scrape_timeframes[0][0], scrape_timeframes[0][1])
dfFeatures = pd.DataFrame(features_dict)
print(job + ' metrics: ' + str(len(dfMetric['data'][0])))
dfList = [pd.DataFrame(metric['values'], columns=['Timestamp', '#'.join(list(metric['metric'].values()))]) for metric in dfMetric['data'][0]]
dfList_info = [pd.DataFrame(metric['metric'].keys(), columns=['#'.join(list(metric['metric'].keys()))]) for metric in dfMetric['data'][0]]
dfList.insert(0, dfFeatures)
for df in dfList:
if len(df.columns) > 1:
df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')
else:
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
dfList = [df.set_index('Timestamp', drop=True) for df in dfList]
save_feature_info(job, dfList_info)
dfFeatures = pd.concat(dfList, axis=1)
print('Done: ' + job)
return dfFeatures
# Function that saves the data as feather files.
def save_data(job, date):
tic = time.clock()
df = structureData(job, date)
file_number = len(os.listdir('data_scraped/' + job + '/')) + 1
df.reset_index(inplace=True)
df.to_feather('data_scraped/' + job + '/' + job + str(file_number - 1) + '.ftr')
toc = time.clock()
print(job + ': ' + str(toc-tic))
return df
# For each separate job, data is requested in a timeframe of 15mins, structured, and then saved.
job_list = ['envoy-stats', 'istiod', 'kube-state-metrics', 'kubernetes-apiservers', 'kubernetes-cadvisor', 'kubernetes-nodes', 'kubernetes-pods', 'kubernetes-service-endpoints', 'litmuschaos', 'skydive']
def scrape_data(job_list):
dfDates = pd.date_range('11:00', '15:45', freq='15min')
interval = dt.timedelta(minutes=15, seconds=1)
now = dt.datetime.now()
for date in dfDates:
time_passed = False
while time_passed is False:
if now > (date + interval):
time_passed = True
for job in job_list:
df = save_data(job, date)
time.sleep(5)
else:
print('Too early')
time.sleep(60)
now = dt.datetime.now()
# The function to collect data is run.
scrape_data(job_list)
# The collected data is filtered. Litmus related and constant features are removed along with features containing too many missing values.
# Litmus related features are removed.
def filter_chaos(dataset):
filter_keywords_list = ['litmus', 'litmuschaos', 'chaos', 'chaos-runner', 'hog', 'iostress', 'helper']
column_list = []
iteration_list = []
final_feature_list = []
checked_shape = False
dataframes_list = []
df = pd.read_feather(dataset)
for column in df.columns:
if any(keyword in column for keyword in filter_keywords_list) == False:
iteration_list.append(column)
return iteration_list
# Function to convert datatypes to numeric.
def convert_datatype(column_data):
return column_data[0][column_data[1]].apply(pd.to_numeric, errors='coerce')
# Parallelization function.
def modifyParallelized(function, df):
column_list = [[df, column] for column in df.columns]
# All the available cores are used.
cores=mp.cpu_count()
# Create the multiprocessing pool of cores.
pool = mp.Pool(cores)
columns_converted = pool.map(function, column_list)
# Close down the pool and join.
pool.close()
pool.join()
#pool.clear()
return columns_converted
# Scraped data for jobs are read, combined, filtered, and saved to pickle files.
job_list = ['envoy-stats', 'istiod', 'kube-state-metrics', 'kubernetes-apiservers', 'kubernetes-cadvisor', 'kubernetes-nodes', 'kubernetes-pods', 'kubernetes-service-endpoints', 'litmuschaos', 'skydive']
filter_keywords_list = ['litmus', 'litmuschaos', 'chaos', 'chaos-runner', 'hog', 'iostress', 'helper']
for job in job_list:
if os.path.isfile('data_combined_filtered/' + job + '/' + job + '_filtered.pkl') == True:
continue
column_list = []
iteration_list = []
final_feature_list = []
dataframes_list = []
# Paths for each file are generated.
dataset_dirs = ['data_scraped/' + job + '/' + file for file in os.listdir('data_scraped/' + job) if job in file]
dataset_dirs.sort()
print('Start Job: ' + job)
df = pd.read_feather(dataset_dirs[0])
print(df.shape)
# Any non-litmus job is filtered on litmus related features.
if job != 'litmuschaos':
column_list = filter_chaos(dataset_dirs[0])
df = df[column_list]
# NA's are dropped, features having more than 5 NA's are dropped.
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
print(df.shape)
column_list = [column for column in df.columns if "Timestamp" not in column]
df[column_list] = pd.concat(modifyParallelized(convert_datatype, df[column_list]), axis=1)
print(df.shape)
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
column_list = df.columns
df.set_index('Timestamp', drop=True, inplace=True)
# All datasets are merged into one dataset.
for dataset in dataset_dirs[1:]:
print(dataset)
df_concat = pd.read_feather(dataset).set_index('Timestamp', drop=True)
concat_columns = list(set(column_list).intersection(df_concat.columns))
df_concat = pd.concat(modifyParallelized(convert_datatype, df_concat[concat_columns]), axis=1)
df = pd.concat([df, df_concat[concat_columns]], axis=0)
time.sleep(2)
print(df.shape)
# For litmuschaos, only features showing which experiment is running are kept.
if job == 'litmuschaos':
df.dropna(axis=0, inplace=True)
df = df[[column for column in df.columns if 'awaited_experiments' in column]]
# Final filters are executed.
else:
column_list = filter_chaos(dataset_dirs[0])
df.reset_index(drop=False, inplace=True)
df.dropna(axis=1, inplace=True, thresh=(len(df) - 5))
df.dropna(axis=0, inplace=True)
df = df.loc[:, (df != df.iloc[0]).any()]
print(df.shape)
df.to_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl')
# Feature summary of all jobs.
for job in job_list:
df = | pd.read_pickle('data_combined_filtered/' + job + '/' + job + '_filtered.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assertIsInstance(recons_data, list)
self.assertEqual(len(recons_data), 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
self.assertEqual(test_data.to_dict(orient='records'),
expected_records)
self.assertEqual(test_data_mixed.to_dict(orient='records'),
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <<EMAIL>>\n'
'To: <<EMAIL>>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
self.assertIn('X', rs.dtype.fields)
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
self.assertIn('index', rs.dtype.fields)
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
self.assertIn('level_0', rs.dtype.fields)
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
| tm.assert_almost_equal(result, expected) | pandas.util.testing.assert_almost_equal |
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score
from config import const
def cross_validation_score(model, X, y, metrics=accuracy_score, n_splits=5, random_state=0, stratified=True, shuffle=True):
oof_train = cross_validation_predict(model, X, y)
y_pred_oof = np.argmax(oof_train, axis=1)
return accuracy_score(y, y_pred_oof)
def cross_validation_predict(model, X, y, X_test=None, metrics=accuracy_score, n_splits=5, random_state=0, stratified=True, shuffle=True):
oof_train = np.zeros((len(y), const.n_class))
y_preds = []
X = X.copy()
y = y.copy()
if X_test is not None:
X_test = X_test.copy()
if stratified:
kfold = StratifiedKFold(
n_splits=n_splits, random_state=random_state, shuffle=shuffle)
for fold_id, (train_index, valid_index) in enumerate(kfold.split(X, y)):
X_train = X.iloc[train_index, :]
X_valid = X.iloc[valid_index, :]
y_train = y.iloc[train_index]
y_valid = y.iloc[valid_index]
model.fit(X_train, y_train)
oof_train[valid_index] = model.predict(X_valid)
if X_test is not None:
y_preds.append(model.predict(X_test))
else:
kfold = KFold(n_splits, random_state=random_state, shuffle=shuffle)
for fold_id, (train_index, valid_index) in enumerate(kfold.split(X)):
X_train = X.iloc[train_index, :]
X_valid = X.iloc[valid_index, :]
y_train = y.iloc[train_index]
y_valid = y.iloc[valid_index]
model.fit(X_train, y_train)
oof_train[valid_index] = model.predict(X_valid)
if X_test is not None:
y_preds.append(model.predict(X_test))
if X_test is None:
return oof_train
else:
return oof_train, sum(y_preds) / len(y_preds)
def stacking(models, X_train, y_train, X_test, n_round=10):
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_preds = []
oofs = []
for i in range(n_round):
y_preds = []
for j, model in enumerate(models):
print("{}th {}".format(i, str(model)))
columns = []
for k in range(const.n_class):
columns.append("{}-round-{}th-{}-{}th-class".format(i,
j, str(model), k))
oof_train, y_pred = cross_validation_predict(
model, X_train, y_train, X_test)
X_append_train = pd.DataFrame(oof_train, columns=columns)
X_append_test = | pd.DataFrame(y_pred, columns=columns) | pandas.DataFrame |
#!/usr/bin/python3
import datetime
import numpy as np
import pandas as pd
import re
import requests
from lxml import etree
from dev_global.env import TIME_FMT
from polaris.mysql8 import (mysqlBase, mysqlHeader)
from jupiter.utils import trans
__version__ = '1.0.10'
class StockBase(object):
"""
param header: mysqlHeader
"""
def __init__(self, header):
self._Today = datetime.date.today().strftime(TIME_FMT)
self.today = datetime.date.today().strftime('%Y%m%d')
if not isinstance(header, mysqlHeader):
raise HeaderException()
self.mysql = mysqlBase(header)
@property
def Today(self):
self._Today = datetime.date.today().strftime(TIME_FMT)
return self._Today
class HeaderException(BaseException):
def __str__(self) -> str:
return "Error occurs due to mysql header."
class StockEventBase(object):
"""
Today: date format like yyyy-mm-dd \n
today: date format like yyyymmdd
"""
def __init__(self, header):
self.Today = datetime.date.today().strftime(TIME_FMT)
self.today = datetime.date.today().strftime('%Y%m%d')
if not header:
raise Exception
self.mysql = mysqlBase(header)
self.stock_list = []
self.coder = StockCodeFormat()
def __str__(self):
return "<Stock Event Base>"
def data_clean(self, df):
for index, col in df.iteritems():
try:
if re.search('date', index):
df[index] = pd.to_datetime(df[index])
elif re.search('int', index):
df[index] = pd.to_numeric(df[index])
elif re.search('float', index):
df[index] = pd.to_numeric(df[index])
elif re.search('char', index):
pass
else:
pass
except Exception:
ERROR(
f"Error while record interest of {col['char_stock_code']}")
return df
def update_date_time(self):
"""
Get date of today.
"""
self.Today = datetime.date.today().strftime(TIME_FMT)
def get_all_stock_list(self):
"""
Return stock code --> list.
"""
query = self.mysql.condition_select(
"stock_manager", "stock_code", "flag='t'"
)
df = | pd.DataFrame.from_dict(query) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
##### HELPER FUNCTIONS #####
# import libraries
import numpy as np
import pandas as pd
from datetime import datetime,date
from device_detector import DeviceDetector
import re
### CLEANING
def drop_rows(df):
print('Starting dropping rows...')
# keep rows where exclude hit is == 0
df = df[df['exclude_hit'] == 0]
# keep rows where hit source != 5, 7, 8 or 9
df = df[(df['hit_source'] != 5) | (df['hit_source'] != 7) |(df['hit_source'] != 8) |(df['hit_source'] != 9)]
# keep rows where visitor id is not missing (6 missing values)
df = df[pd.notnull(df['visitor_id'])]
# clean visit page num and keep rows where visit page num is not missing or faulty (118 missing and 269 faulty values)
df['visit_page_num'] = df['visit_page_num'].apply(lambda x: np.nan if len(str(x)) > 10 else x)
df = df[pd.notnull(df['visit_page_num'])]
print('Dropping rows complete.')
return df
def drop_columns(df):
print('Starting dropping columns...')
# select columns to keep
columns_to_keep = ['visitor_id',
'visit_start_time_gmt',
'hit_time_gmt',
'date_time',
# numerical columns
'visit_num',
'visit_page_num',
'purchase_boolean',
'product_view_boolean',
'checkout_boolean',
'cart_addition_boolean',
'cart_removal_boolean',
'cart_view_boolean',
'campaign_view_boolean',
'cart_value',
'page_view_boolean',
'last_purchase_num',
'standard_search_results_clicked',
'standard_search_started',
'suggested_search_results_clicked',
# categorical columns
'country',
'cookies',
'persistent_cookie',
'search_page_num',
'connection_type',
'search_engine',
'marketing_channel',
'referrer_type',
'new_visit',
'hourly_visitor',
'daily_visitor',
'weekly_visitor',
'monthly_visitor',
'quarterly_visitor',
'yearly_visitor',
'product_items',
'product_item_price',
'product_categories',
'device_type_user_agent',
'device_brand_name_user_agent',
'device_operating_system_user_agent',
'device_browser_user_agent',
'repeat_orders',
'net_promoter_score',
'hit_of_logged_in_user',
'registered_user',
'user_gender',
'user_age',
'visit_during_tv_spot']
# subset dataframe to select only columns to keep
df = df[columns_to_keep]
print('Dropping columns complete.')
return df
def rename_columns(df):
print('Starting renaming columns...')
df.rename(columns={'va_closer_id' : 'marketing_channel'}, inplace=True)
df.rename(columns={'os' : 'operating_system'}, inplace=True)
df.rename(columns={'ref_type' : 'referrer_type'}, inplace=True)
df.rename(columns={'post_search_engine' : 'search_engine'}, inplace=True)
df.rename(columns={'cart_value_(v50)' : 'cart_value'}, inplace=True)
df.rename(columns={'int._stand._search_result_clicked_(e16)' : 'standard_search_results_clicked'}, inplace=True)
df.rename(columns={'active_stand._search_started_(e17)' : 'standard_search_started'}, inplace=True)
df.rename(columns={'sugg._search_result_clicked_(e18)' : 'suggested_search_results_clicked'}, inplace=True)
df.rename(columns={'post_cookies' : 'cookies'}, inplace=True)
df.rename(columns={'post_persistent_cookie' : 'persistent_cookie'}, inplace=True)
df.rename(columns={'repeat_orders_(e9)' : 'repeat_orders'}, inplace=True)
df.rename(columns={'net_promoter_score_raw_(v10)_-_user' : 'net_promoter_score'}, inplace=True)
df.rename(columns={'hit_of_logged_in_user_(e23)' : 'hit_of_logged_in_user'}, inplace=True)
df.rename(columns={'registered_user_(user)_(v34)' : 'registered_user'}, inplace=True)
df.rename(columns={'user_gender_(v61)' : 'user_gender'}, inplace=True)
df.rename(columns={'user_age_(v62)' : 'user_age'}, inplace=True)
df.rename(columns={'visit_during_tv_spot_(e71)' : 'visit_during_tv_spot'}, inplace=True)
print('Renaming columns complete')
return df
def fill_missing_and_faulty_values(df):
print('Starting filling missing and faulty values...')
df['cart_value'].fillna(0, inplace=True)
df['registered_user'] = df['registered_user'].apply(lambda x: 1 if x == 'y' else 0)
df['cookies'] = df['cookies'].apply(lambda x: 1 if x == 'Y' else 0)
df['persistent_cookie'] = df['persistent_cookie'].apply(lambda x: 1 if x == 'Y' else 0)
print('Filling missing and faulty values complete.')
return df
def cast_data_types(df):
print('Starting casting data types...')
# datetime columns
df['date_time'] = df['date_time'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
df['hit_time_gmt'] = pd.to_datetime(df['hit_time_gmt'], unit='s')
df['visit_start_time_gmt'] = pd.to_datetime(df['visit_start_time_gmt'], unit='s')
# integer columns
integer_columns = ['visit_num',
'visit_page_num',
'purchase_boolean',
'product_view_boolean',
'checkout_boolean',
'cart_addition_boolean',
'cart_removal_boolean',
'cart_view_boolean',
'campaign_view_boolean',
'page_view_boolean',
'last_purchase_num',
'standard_search_results_clicked',
'standard_search_started',
'suggested_search_results_clicked',
'cookies',
'persistent_cookie',
'search_page_num',
'new_visit',
'hourly_visitor',
'daily_visitor',
'weekly_visitor',
'monthly_visitor',
'quarterly_visitor',
'yearly_visitor',
'repeat_orders',
'hit_of_logged_in_user',
'registered_user',
'visit_during_tv_spot']
for column in integer_columns:
df[column] = df[column].apply(lambda x: int(float(x)))
# float column
df['cart_value'] = df['cart_value'].apply(lambda x: float(x))
print('Casting data types complete.')
return df
### MAPPING
def connection_type_mapping(df):
print('Starting connection type mapping...')
# load file for connection type mapping and select columns
connection_type_mapping = pd.read_csv('../data/mapping_files/connection_type.tsv', sep='\t', header=None)
connection_type_mapping.columns = ['connection_type_id', 'connection_type_name']
# create dictionary for connection type mapping
connection_type_mapping_dict = dict(zip(connection_type_mapping.connection_type_id, connection_type_mapping.connection_type_name))
# map connection types
df['connection_type'] = df['connection_type'].map(connection_type_mapping_dict).fillna(df['connection_type'])
print('Connection type mapping complete.')
return df
def country_mapping(df):
print('Starting country mapping...')
# load file for country mapping and select columns
country_mapping = pd.read_csv('../data/mapping_files/country.tsv', sep='\t', header=None)
country_mapping.columns = ['country_id', 'country_name']
# drop dupliate countries
country_mapping = country_mapping.drop_duplicates('country_name').reset_index(drop=True)
# create dictionary for country mapping
country_mapping_dict = dict(zip(country_mapping.country_id, country_mapping.country_name))
# map countries
df['country'] = df['country'].map(country_mapping_dict).fillna(df['country'])
print('Country mapping complete.')
return df
def custom_evars_mapping(df):
print('Starting custom evars mapping...')
# load file for custom evars mapping and select columns
evars = pd.read_csv('../data/mapping_files/custom_evars.tsv', sep='\t')
evars_mapping = evars[['id', 'name']]
# map custom evars
evar_cols = [x for x in df.columns if x.lower()[:9] == 'post_evar']
evar_cols = [x.replace('post_', '') for x in evar_cols]
evars_mapped = evars[evars['id'].isin(evar_cols)][['id', 'name']]
evars_mapped['id'] = evars_mapped['id'].apply(lambda x: 'post_' + x)
evars_mapped = evars_mapped.reset_index(drop=True)
# rename custom evars
for i in range(evars_mapped.shape[0]):
df.rename(columns={evars_mapped.iloc[i,0] : str.lower(evars_mapped.iloc[i,1]).replace(' ','_')}, inplace=True)
print('Custom evars mapping complete.')
return df
def custom_marketing_channel_mapping(df):
print('Starting custom marketing channel mapping...')
# load file for custom marketing channel mapping
custom_marketing_channel_mapping = pd.read_csv('../data/mapping_files/custom_marketing_channels.tsv', sep='\t')
# create dictionary for marketing channel mapping
custom_marketing_channel_mapping_dict = dict(zip(custom_marketing_channel_mapping.channel_id, custom_marketing_channel_mapping.name))
# map custom marketing channels
df['va_closer_id'] = df['va_closer_id'].apply(lambda x: float(x))
df['va_closer_id'] = df['va_closer_id'].map(custom_marketing_channel_mapping_dict).fillna(df['va_closer_id'])
df['va_closer_id'] = df['va_closer_id'].apply(lambda x: 'Unknown' if x == 0 else x)
print('Custom marketing channel mapping complete.')
return df
def custom_and_standard_events_mapping(df):
print('Starting custom and standard events mapping...')
# fill missing values in post event list
df['post_event_list'] = df['post_event_list'].fillna('Unknown')
# load file for standard event mapping and select columns
standard_events = pd.read_csv('../data/mapping_files/event.tsv', sep='\t', header=None)
standard_events.columns = ['event_id', 'event_name']
# load file for custom event mapping and modify event id for matching
custom_events = pd.read_csv('../data/mapping_files/custom_events.tsv', sep='\t')
custom_events['event_id'] = custom_events.index + 200
# map standard and custom events
events = pd.merge(standard_events, custom_events, how='inner', on='event_id')
events_mapping = events[['event_id', 'name']]
events_mapping = events_mapping.reset_index(drop=True)
# create event dummies
for id, event in zip(events_mapping.iloc[:,0], events_mapping.iloc[:,1]):
df[str.lower(event).replace(' ','_')] = df['post_event_list'].apply(lambda x: 1 if ','+str(id)+',' in x else 0)
# drop internal users
df = df[df['internal_user_(e30)'] != 1]
print('Standard and custom events mapping complete.')
return df
def referrer_type_mapping(df):
print('Starting referrer type mapping...')
# load file for referrer type mapping and select columns
referrer_type_mapping = pd.read_csv('../data/mapping_files/referrer_type.tsv', sep='\t', header=None)
referrer_type_mapping.columns = ['referrer_type_id', 'referrer_type_name', 'referrer_type']
# create dictionary for referrer type mapping
referrer_type_mapping_dict = dict(zip(referrer_type_mapping.referrer_type_id, referrer_type_mapping.referrer_type))
# map referrer types
df['ref_type'] = df['ref_type'].map(referrer_type_mapping_dict).fillna(df['ref_type'])
print('Referrer type mapping complete.')
return df
def search_engine_mapping(df):
print('Starting search engine mapping...')
# load file for search engine mapping and select columns
search_engine_mapping = pd.read_csv('../data/mapping_files/search_engines.tsv', sep='\t', header=None)
search_engine_mapping.columns = ['search_engine_id', 'search_engine_name']
# create dictionary for search engine mapping
search_engine_mapping_dict = dict(zip(search_engine_mapping.search_engine_id, search_engine_mapping.search_engine_name))
# map search engines
df['post_search_engine'] = df['post_search_engine'].map(search_engine_mapping_dict).fillna(df['post_search_engine'])
# clean search_engine and keep only general search engine name
df['post_search_engine'] = df['post_search_engine'].apply(lambda x: str(x).split(' ')[0] if pd.notnull(x) else x)
df['post_search_engine'] = df['post_search_engine'].apply(lambda x: 'Google' if x == 'googleadservices.com' else x)
df['post_search_engine'] = df['post_search_engine'].apply(lambda x: 'Unknown' if x == '0.0' else x)
print('Search engine mapping complete.')
return df
def generate_user_agent_mapping(df):
print('Starting user agent device type, brand name, operating system and bot flag mapping...')
# fill missing values
df['user_agent'] = df['user_agent'].fillna('Unknown')
# create dataframe for user agent mapping and fill with unique user agents
columns = ['user_agent',
'device_type_user_agent',
'device_brand_name_user_agent',
'device_operating_system_user_agent',
'device_browser_user_agent',
'device_is_bot_user_agent']
index = np.arange(df['user_agent'].nunique())
user_agent_mapping_df = pd.DataFrame(index=index, columns=columns)
user_agent_mapping_df['user_agent'] = df['user_agent'].unique()
# map device type
user_agent_mapping_df['device_type_user_agent'] = user_agent_mapping_df['user_agent'].apply(lambda x: DeviceDetector(x).parse().device_type())
user_agent_mapping_df['device_type_user_agent'] = user_agent_mapping_df['device_type_user_agent'].apply(lambda x: 'Unknown' if x == '' else x)
# map brand name
user_agent_mapping_df['device_brand_name_user_agent'] = user_agent_mapping_df['user_agent'].apply(lambda x: DeviceDetector(x).parse().device_brand_name())
user_agent_mapping_df['device_brand_name_user_agent'] = user_agent_mapping_df['device_brand_name_user_agent'].apply(lambda x: 'Unknown' if x == 'UNK' else x)
# map operating system
user_agent_mapping_df['device_operating_system_user_agent'] = user_agent_mapping_df['user_agent'].apply(lambda x: DeviceDetector(x).parse().os_name())
user_agent_mapping_df['device_operating_system_user_agent'] = user_agent_mapping_df['device_operating_system_user_agent'].apply(lambda x: 'Unknown' if x == '' else x)
# map browser
user_agent_mapping_df['device_browser_user_agent'] = user_agent_mapping_df['user_agent'].apply(lambda x: DeviceDetector(x).parse().client_name())
# map bot flag
user_agent_mapping_df['device_is_bot_user_agent'] = user_agent_mapping_df['user_agent'].apply(lambda x: DeviceDetector(x).parse().is_bot())
user_agent_mapping_df['device_is_bot_user_agent'] = user_agent_mapping_df['device_is_bot_user_agent'].apply(lambda x: 1 if x == True else 0)
print('User agent device type, brand name, operating system, browser and bot flag mapping complete.')
return user_agent_mapping_df
def user_agent_mapping(df):
print('Starting user agent mapping...')
# fill missing values
df['user_agent'] = df['user_agent'].fillna('Unknown')
# load file for user agent mapping
user_agent_mapping = pd.read_pickle('../data/mapping_files/user_agent_mapping.pkl.gz')
# merge user agent mapping and df
df = pd.merge(df, user_agent_mapping, how='left', on='user_agent')
# drop rows where device_is_bot_user_agent == 1
df = df.drop(df[df['device_is_bot_user_agent'] == 1].index)
# fill missing values in user agent columns (device_type_user_agent 51, device_brand_name_user_agent 51, device_operating_system_user_agent 51 and device_browser_user_agent 842 missing rows)
device_type_columns = [x for x in df.columns if x.lower()[:6] == 'device']
for i in device_type_columns:
df[i] = df[i].apply(lambda x: 'Unknown' if pd.isnull(x) else x)
print('User agent mapping complete.')
return df
### PROCESSING
def process_product_items(df):
print('Starting processing product items...')
# sum of product items per hit
df['product_items'] = df['product_items'].apply(lambda x: len([x for x in x.split(';') if x]) if pd.notnull(x) else 0)
print('Processing product items complete.')
return df
def process_product_item_prices(df):
print('Starting processing product item prices...')
# sum of product items prices per hit
df['product_item_price'] = df['product_item_price'].apply(lambda x: sum([float(x) for x in x.split(';') if x]) if (pd.notnull(x)) & (x != 'product_item_price') else 0)
print('Processing product item prices complete.')
return df
def process_product_categories(df):
print('Starting processing product categories...')
# select level 1 product categories
product_categories_level_1 = ['Computer & Elektronik',
'Wohnen & Haushalt',
'Schönheit & Gesundheit',
'Baumarkt & Garten',
'Baby & Spielzeug',
'Sport & Freizeit',
'Mode & Accessoires',
'Lebensmittel & Getränke',
'Medien & Unterhaltung']
# clean product categories and keep only level 1 product categories
df['product_categories_first'] = df['product_categories_first'].apply(lambda x: 'Unknown' if pd.isnull(x) else x.split('/'))
df['product_categories_first'] = df['product_categories_first'].apply(lambda x: x if x == 'Unknown' else [x.strip() for x in x][0])
df['product_categories_first_level_1'] = df['product_categories_first'].apply(lambda x: x if x in product_categories_level_1 else 'Unknown')
df['product_categories_first_level_1'] = df['product_categories_first_level_1'].apply(lambda x: 'Schoenheit & Gesundheit' if x == 'Schönheit & Gesundheit' else x)
df['product_categories_first_level_1'] = df['product_categories_first_level_1'].apply(lambda x: 'Lebensmittel & Getraenke' if x == 'Lebensmittel & Getränke' else x)
df.drop('product_categories_first', axis=1, inplace=True)
print('Processing product categories complete.')
return df
def process_net_promoter_score(df):
print('Starting processing net promoter score...')
# clean net promoter score and fill missing values
df['net_promoter_score_first'] = df['net_promoter_score_first'].apply(lambda x: 'Unknown' if pd.isnull(x) else ('8' if x == '8th' else str(int(x))))
print('Processing net promoter score complete.')
return df
def process_user_gender(df):
print('Starting processing user gender...')
# clean user gender and fill missing values
df['user_gender_first'] = df['user_gender_first'].apply(lambda x: 'Unknown' if | pd.isnull(x) | pandas.isnull |
from __future__ import print_function
import sys
import pandas as pd
import numpy.matlib
import numpy as np
import scipy
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn
from sklearn import preprocessing
from sklearn import mixture
from sklearn.neighbors.kde import KernelDensity
import glob
import seaborn as sns
import collections
sns.set_context('talk')
sns.set_style('white')
sns.set_style("ticks")
import re
from scipy import sparse, io
import os
import math
import csv
import fbpca
from matplotlib import rcParams
import numpy as np
import scipy.stats as stats
from scipy.stats import gaussian_kde
import statsmodels.api as sm
import statsmodels
from statsmodels.distributions.empirical_distribution import ECDF
#GO imports
#from goatools.obo_parser import GODag
#from goatools.associations import read_ncbi_gene2go
#from goatools.go_enrichment import GOEnrichmentStudy
# The following packages are typically not installed by default in Python installations, but would enable some additional functionality
#import Levenshtein (edit_dist)
#import infomap (info_cluster)
#import networkx as nx (info_cluster)
## progress bar
def update_progress(progress):
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
#######read input############
def read_10x(pathin):
"""Return Pandas Dataframe containing 10x dataset """
mat=scipy.io.mmread(os.path.join(pathin, "matrix.mtx"))
genes_path = os.path.join(pathin, "genes.tsv")
gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")]
gene_final = [x+'_'+y for x,y in zip(gene_ids,gene_names)]
barcodes_path = os.path.join(pathin, "barcodes.tsv")
barcodes = [row[0][0:14] for row in csv.reader(open(barcodes_path), delimiter="\t")]
DGE=pd.DataFrame(mat.toarray())
DGE.index=gene_final
DGE.columns=barcodes
return DGE
def genenames_from10x(genelist):
"""Return gene names from 10x index generated with read_10x """
genesymbol=[]
#ensemblid=[]
for i in range(len(genelist)):
curgene=genelist[i]
starts=[]
for x in re.finditer('_',curgene):
starts.append(x.start()+1)
genesymbol.append(curgene[starts[-1]:])
return genesymbol#,ensemblid
def genenames_from10x_mod(genelist):
"""Return gene names from 10x index generated with read_10x """
genesymbol=[]
#ensemblid=[]
for i in range(len(genelist)):
curgene=genelist[i]
starts=[]
for x in re.finditer('_',curgene):
starts.append(x.start()+1)
genesymbol.append(curgene[starts[0]:])
return genesymbol#,ensemblid
def collapse2gene(DGE):
DGE_gene=DGE.copy()
DGE_gene.index=genenames_from10x(DGE_gene.index)
DGE_gene=DGE_gene.groupby(DGE_gene.index).sum()
return DGE_gene
def guide2gene(guide):
"""get genename between underscores"""
underscore_pos = []
count=0
if ('INTERGENIC' in guide):
nameout='INTERGENIC'
elif ('_' in guide):
for x in re.finditer('_',guide):
if count<2:
underscore_pos.append(x.span()[1])
nameout=re.sub('sg','',guide[underscore_pos[0]:underscore_pos[1]-1])
else:
nameout=guide
return nameout
def get_batches(cbcs):
"""Return batch - last underscore in column names"""
batchvec=[]
for cell in cbcs:
starts=[]
for x in re.finditer('_',cell):
starts.append(x.start()+1)
batchvec.append(cell[starts[-2]:])
return np.array(batchvec)
def genelevel_dict(GUIDES_DICT):
"""Collapse guide level dictionary to gene level using guide2gene"""
genes=[guide2gene(x) for x in GUIDES_DICT.keys()]
GUIDES_DICT_GENES={}
for gene in genes:
GUIDES_DICT_GENES[gene]=[]
for key in GUIDES_DICT.keys():
GUIDES_DICT_GENES[guide2gene(key)].extend(GUIDES_DICT[key])
return GUIDES_DICT_GENES
####transform data#######
def tp10k_transform(DGE,norm_factor=1.0e4):
"""normalize columns of pandas dataframe to sum to a constant, by default 10,000"""
return(norm_factor*(DGE / DGE.sum()))
def Zcells(DGE):
"""Z transformation of columns of pandas"""
DGEZ=DGE.copy()
DGEZ=pd.DataFrame(sklearn.preprocessing.scale(DGE,axis=0))
DGEZ.index=DGE.index
DGEZ.columns=DGE.columns
return DGEZ
def Zgenes(DGE,batchvec=None):
"""Z transformation of rows of pandas, option for per batch normalization"""
DGEZ=DGE.copy()
if batchvec is None:
DGEZ=pd.DataFrame(sklearn.preprocessing.scale(DGEZ,axis=1))
DGEZ.columns=DGE.columns
DGEZ.index=DGE.index
else:
batch=np.unique(batchvec)
for curbatch in batch:
DGEZ.ix[:,np.array(batchvec)==curbatch]=sklearn.preprocessing.scale(DGEZ.ix[:,np.array(batchvec)==curbatch],axis=1)
return DGEZ
def Zgenes_floor(DGE,floor=0,batchvec=None):
"""Z transformation of rows of pandas dataframe, with flooring of std dev, option for per batch normalization"""
DGEZ=DGE.copy()
if batchvec is None:
curstd=DGE.std(axis=1)+floor
curmean=DGE.mean(axis=1)
curZ=(DGEZ.subtract(curmean,axis=0)).divide(curstd,axis=0)
DGEZ=curZ
DGEZ.columns=DGE.columns
DGEZ.index=DGE.index
else:
batch=np.unique(batchvec)
for curbatch in batch:
curDGE=DGEZ.ix[:,np.array(batchvec)==curbatch]
curstd=curDGE.std(axis=1)+floor
curmean=curDGE.mean(axis=1)
curZ=(curDGE.subtract(curmean,axis=0)).divide(curstd,axis=0)
DGEZ.ix[:,np.array(batchvec)==curbatch]=np.array(curZ)
return DGEZ
def Centergenes(DGE,batchvec=None):
"""Median centering of rows of pandas, option for per batch normalization"""
DGEC=DGE.copy()
if batchvec is None:
DGEC=DGEC.subtract(DGEC.median(axis=1),axis='rows')
else:
batch=np.unique(batchvec)
for curbatch in batch:
DGEC.ix[:,np.array(batchvec)==curbatch]=DGEC.ix[:,np.array(batchvec)==curbatch].subtract(DGEC.ix[:,np.array(batchvec)==curbatch].median(axis=1),axis='rows')
return DGEC
def permute_matrix(DGE,bins=20,verbose=0):
"""Permute genes based on similar expression levels"""
DGE_perm=DGE.copy()
GSUMS=np.sum(DGE,axis=1)
breakvec = np.linspace(1,100,bins)
breaks=[]
for breaker in breakvec:
breaks.append(np.percentile(GSUMS,breaker))
breaks=np.unique(breaks)
for i in range(len(breaks)-1):
if verbose==1:
print(np.round((1.0*i)/(len(breaks)-1)))
for j in range(len(DGE.columns)):
curlogical=np.logical_and(GSUMS>breaks[i],GSUMS<=breaks[i+1])
DGE_perm.ix[curlogical,j]=np.random.permutation(DGE_perm.ix[curlogical,j])
return DGE_perm
def downsample_reads(DF,per_reads=1.0,nrpc=None):
DF_mod=DF.copy()
numgenes=np.shape(DF_mod)[0]
genenames=DF_mod.index
DF_mod.index=range(numgenes)
cells=DF_mod.columns
readspercell=np.sum(DF_mod,axis=0)
totalreads =np.sum(readspercell)
newreads =np.round(totalreads*per_reads)
cellpercents=np.divide(1.0*readspercell,totalreads)
if nrpc:
newreadspercell=nrpc
else:
newreadspercell=[int(x) for x in np.round(np.multiply(cellpercents,newreads))]
DF_out=pd.DataFrame()
for i in range(len(cells)):
vectorize=[]
curcell=DF_mod[cells[i]]
curcell=curcell[curcell!=0]
for j in curcell.index:
vectorize.extend([j]*curcell[j])
vec_sample=np.random.choice(vectorize,size=newreadspercell[i],replace=False)
sampled_vec=np.histogram(vec_sample,bins=range(numgenes+1))[0]
DF_out[cells[i]]=sampled_vec
DF_out.index=genenames
return DF_out
def downsampler(DF,percell=1.0,perreads=1.0):
if percell==1.0:
DF_sampled=DF.copy()
else:
newcells=int(np.round(np.shape(DF)[1]*percell))
DF_sampled=DF.sample(newcells,axis=1)
if perreads==1.0:
return DF_sampled
else:
return downsample_reads(DF_sampled,perreads)
###########generate covariates#########
def dict2X(GUIDES_DICT,cbcs):
"""convert guide cbc dictionary into covariate matrix"""
X=pd.DataFrame()
for key in GUIDES_DICT.keys():
curkey=[]
for cbc in cbcs:
if cbc in GUIDES_DICT[key]:
curkey.append(1)
else:
curkey.append(0)
X[key]=np.array(curkey)
X.index=cbcs
return X
def clusters2X(clusters,cbcs):
"""convert cell cluster cbc dictionary into covariate matrix"""
clusterun=clusters.columns
X=pd.DataFrame(np.zeros((len(cbcs),len(clusterun))))
X.index=cbcs
clusters_intersect=clusters.loc[list(set(clusters.index).intersection(set(cbcs)))]
X.loc[clusters_intersect.index]=clusters_intersect
return X
def Xguides2genes(DF):
Xgene=DF.copy()
Xgene=Xgene.T
Xgene.index=[guide2gene(x) for x in Xgene.index]
Xgene_group=(Xgene.groupby(Xgene.index).sum()>0).sum()
XgeneF=1.0*(Xgene.groupby(Xgene.index).sum()>0).T
return XgeneF
def Y2FlatCov(Y,verbose=0):
ngenes=np.shape(Y)[0]
triuind=np.triu_indices(ngenes)
curnames=Y.index
covgenes=[curnames[x]+'-'+curnames[y] for x,y in zip(triuind[0],triuind[1])]
triu_mask=np.triu(np.ones((ngenes,ngenes))).astype(np.bool)
ncells=np.shape(Y)[1]
i=0
COVout=pd.DataFrame(np.zeros((len(triuind[0]),ncells)))
COVout.columns=Y.columns
for col in Y:
update_progress(np.divide(1.0*i,ncells))
cell=pd.DataFrame(Y[col])
#cell=np.divide(cell,np.linalg.norm(cell))
cellcov=cell.dot(cell.T)
triucellcov=cellcov.where(np.triu(np.ones(cellcov.shape)).astype(np.bool)).values.flatten()
triucellcov=triucellcov[~numpy.isnan(triucellcov)]
COVout[col]=triucellcov
i+=1
COVout.index=covgenes
return COVout
def create_interactions(DF):
"""Take covariate matrix and generate pairwise interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
name1=dfcolumns[i]
name2=dfcolumns[j]
if(i<j):
twonames=np.sort(list(set([str(name1),str(name2)])))
if len(twonames)==2:
INTERACT[str(name1)+'-'+str(name2)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])
groupthese.append(str(twonames[0])+'-'+str(twonames[1]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT=INTERACT>0
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_interactions_nothresh(DF):
"""Take covariate matrix and generate pairwise interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
name1=dfcolumns[i]
name2=dfcolumns[j]
if(i<j):
twonames=np.sort(list(set([str(name1),str(name2)])))
if len(twonames)==2:
INTERACT[str(name1)+'-'+str(name2)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])
groupthese.append(str(twonames[0])+'-'+str(twonames[1]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_3_interactions(DF):
"""Take covariate matrix and generate three-way interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
for k in range(len(dfcolumns)):
if((i<j)&(i<k)):
name1=dfcolumns[i]
name2=dfcolumns[j]
name3=dfcolumns[k]
threenames=np.sort(list(set([str(name1),str(name2),str(name3)])))
if len(threenames)==3:
INTERACT[str(name1)+'-'+str(name2)+'-'+str(name3)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])*np.array(DF.ix[:,k])
groupthese.append(str(threenames[0])+'-'+str(threenames[1])+'-'+str(threenames[2]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1])+'-'+guide2gene(x.split('-')[2]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT=INTERACT>0
INTERACT.index=DF.index
return(1.0*INTERACT)
def create_3_interactions_nothresh(DF):
"""Take covariate matrix and generate three-way interaction matrix between covariates"""
INTERACT=pd.DataFrame()
dfcolumns=DF.columns
groupthese=[]
for i in range(len(dfcolumns)):
for j in range(len(dfcolumns)):
for k in range(len(dfcolumns)):
if((i<j)&(i<k)):
name1=dfcolumns[i]
name2=dfcolumns[j]
name3=dfcolumns[k]
threenames=np.sort(list(set([str(name1),str(name2),str(name3)])))
if len(threenames)==3:
INTERACT[str(name1)+'-'+str(name2)+'-'+str(name3)]=np.array(DF.ix[:,i])*np.array(DF.ix[:,j])*np.array(DF.ix[:,k])
groupthese.append(str(threenames[0])+'-'+str(threenames[1])+'-'+str(threenames[2]))
#INTERACT.columns=[guide2gene(x.split('-')[0])+'-'+guide2gene(x.split('-')[1])+'-'+guide2gene(x.split('-')[2]) for x in INTERACT.columns]
INTERACT=INTERACT.T
INTERACT['genes']=INTERACT.index
INTERACT=INTERACT.groupby(groupthese).sum().T
INTERACT.index=DF.index
return(1.0*INTERACT)
#############Linear Model Stuff########
def cv_rsq(Y,X,k=5,per=0.8,adj=[],relcel=[]):
Y_tmp=Y.copy()
X_tmp=X.copy()
rsq=[]
for i in range(k):
print(i)
numsamples=int(np.round(per*len(Y_tmp)))
train=np.random.choice(range(len(Y_tmp)),size=numsamples,replace=False)
traincells=Y_tmp.index[train]
testcells=list(set(Y_tmp.index)-set(traincells))
print('1',len(testcells))
if len(relcel)>0:
testcells=list(set(testcells).intersection(set(relcel)))
print('2',len(testcells))
Y_train=Y_tmp.loc[traincells]
Y_test=Y_tmp.loc[testcells]
flag=0
X_train=X_tmp.loc[traincells]
X_test=X_tmp.loc[testcells]
lmfit=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)#linear_model.Ridge
lmfit.fit(X_train,Y_train)
if len(adj)>0:
X_train_adj=bayes_cov_col(Y_train,X_train,adj,lmfit)
lmfit.fit(X_train_adj,Y_train)
X_test_adj=bayes_cov_col(Y_test,X_test,adj,lmfit)
rsq.append(lmfit.score(X_test_adj,Y_test))
else:
rsq.append(lmfit.score(X_test,Y_test))
return rsq
def marginal_covariates(y,x,k=4,percent=0.8):
"""Input is observations and list of covariates
like guides, qc, batch, guide interactions, cell types, cell type interactions
perform k-fold CV on xx percent of data
for each of the 2^n combinations of covariates
"""
if isinstance(x,list):
numsamples=int(np.round(percent*len(y)))
X=pd.concat(x,axis=1)
# rsqall=[]
# for i in range(k):
# print(i)
# train=np.random.choice(range(len(y)),size=numsamples,replace=False)
# traincells=y.index[train]
# testcells=list(set(y.index)-set(traincells))
# X_train=X.loc[traincells]
# Y_train=y.loc[traincells]
# X_test=X.loc[testcells]
# Y_test=y.loc[testcells]
# enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0012,max_iter=10000)
# enet.fit(X_train,Y_train)
# print('model has been fit')
# rsqall.append(enet.score(X_test,Y_test))
rsqind=[]
big_resid=[]
for j in range(len(x)):
print(j)
rsqk=[]
for i in range(k):
print(k)
train=np.random.choice(range(len(y)),size=numsamples,replace=False)
traincells=y.index[train]
testcells=list(set(y.index)-set(traincells))
Y_train=y.loc[traincells]
Y_test=y.loc[testcells]
flag=0
if j==0:
X_train=x[j].loc[traincells]
X_test=x[j].loc[testcells]
lmfit=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)
else:
X=pd.concat(x[0:j],axis=1)
X_train=X.loc[traincells]
X_test=X.loc[testcells]
lmfit.fit(X_train,Y_train)
rsqk.append(lmfit.score(X_test,Y_test))
Yhat=lmfit.predict(X_test)
if flag==0:
df_resid=Yhat-Y_test
flag=1
else:
df_resid = (df_resid + (Yhat-Y_test)) / 2.0
rsqind.append(rsqk)
big_resid.append(df_resid)
else:
print('x is not a list')
return
#df_rsq=pd.concat([pd.DataFrame(rsqind)],axis=0)
return rsqind
def crosscov_interactions(X1,X2):
cols1=X1.columns
cols2=X2.columns
Xout=pd.DataFrame()
for i in range(len(cols1)):
for j in range(len(cols2)):
if i>j:
curi=cols1[i]
curj=cols2[j]
Xout[str(curi)+'_'+str(curj)]=X1[curi]*X2[curj]
return Xout
def nonzeroX2dict(X):
dict_out={}
for col in X.columns:
curcol=X[col]
dict_out[col]=curcol[curcol>0].index
return dict_out
def bayes_cov_col(Y,X,cols,lm):
"""
@Y = Expression matrix, cells x x genes, expecting pandas dataframe
@X = Covariate matrix, cells x covariates, expecting pandas dataframe
@cols = The subset of columns that the EM should be performed over, expecting list
@lm = linear model object
"""
#EM iterateit
Yhat=pd.DataFrame(lm.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
SSE_all=np.square(Y.subtract(Yhat))
X_adjust=X.copy()
df_SSE = []
df_logit = []
for curcov in cols:
curcells=X[X[curcov]>0].index
if len(curcells)>2:
X_notcur=X.copy()
X_notcur[curcov]=[0]*len(X_notcur)
X_sub=X_notcur.loc[curcells]
Y_sub=Y.loc[curcells]
GENE_var=2.0*Y_sub.var(axis=0)
vargenes=GENE_var[GENE_var>0].index
Yhat_notcur=pd.DataFrame(lm.predict(X_sub))
Yhat_notcur.index=Y_sub.index
Yhat_notcur.columns=Y_sub.columns
SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))
SSE=SSE_all.loc[curcells].subtract(SSE_notcur)
SSE_sum=SSE.sum(axis=1)
SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)
logitify=np.divide(1.0,1.0+np.exp(SSE_transform))#sum))
df_SSE.append(SSE_sum)
df_logit.append(logitify)
X_adjust[curcov].loc[curcells]=logitify
return X_adjust
def run_model(Y,X,EM_DICT=None,verbose=0,modalpha=0.0005,removecells=1):
"""
@Y = Expression matrix, cellx x genes, expecting pandas dataframe
@X = Covariate matrix, cells x covariates, expecting pandas dataframe
@EM_DICT = A dictionary of cell labels for each perturbation to perform the EM-step over, expecting dict
"""
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)
enet.fit(X,Y)
if verbose==1:
print(enet.score(X,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X.columns
Be.index=Y.columns
#EM iterateit
Yhat=pd.DataFrame(enet.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
SSE_all=np.square(Y.subtract(Yhat))
X_adjust=X.copy()
X_adjust['unperturbed']=[0]*len(X)
df_SSE = []
df_logit = []
df_pf = []
if EM_DICT is not None:
for curcov in EM_DICT.keys():
curcells=EM_DICT[curcov]
X_notcur=X.copy()
X_notcur[curcov]=[0]*len(X_notcur)
X_sub=X_notcur.loc[curcells]
Y_sub=Y.loc[curcells]
GENE_var=2.0*Y_sub.var(axis=0)
vargenes=GENE_var[GENE_var>0].index
Yhat_notcur=pd.DataFrame(enet.predict(X_sub))
Yhat_notcur.index=Y_sub.index
Yhat_notcur.columns=Y_sub.columns
SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))
SSE=SSE_all.loc[curcells].subtract(SSE_notcur)
SSE_sum=SSE.sum(axis=1)
SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)
logitify=np.divide(1.0,1.0+np.exp(SSE_sum))#SSE_transform))#sum))
df_SSE.append(SSE_sum)
df_logit.append(logitify)
pf=np.mean(logitify>0.99)
if verbose==1:
print(curcov,pf)
df_pf.append([curcov,pf])
weak_perturb=1.0*(logitify<0.1)
X_adjust[curcov].loc[curcells]=logitify
X_adjust['unperturbed'].loc[curcells]=weak_perturb
print('done with EM')
#refit model
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)
if removecells==1:
goodcells=X_adjust['unperturbed']!=1
print(np.mean(goodcells))
Y=Y[goodcells]
X_adjust=X[goodcells]
enet.fit(X_adjust,Y)
Yhat=pd.DataFrame(enet.predict(X_adjust))
Yhat.index=Y.index
Yhat.columns=Y.columns
if verbose==1:
print(enet.score(X_adjust,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X_adjust.columns
Be.index=Y.columns
RES_out=Y.subtract(Yhat)
if EM_DICT is not None:
return(Be,X_adjust,RES_out,df_pf)#,df_SSE,df_logit)
return(Be,X_adjust,RES_out)#,df_SSE,df_logit)
def run_model_bycol(Y,X,EM_cols=None,modalpha=0.005,verbose=0):
"""
@Y = Expression matrix, expecting pandas dataframe, cells x genes
@X = Covariate matrix, expecting pandas dataframe, cells x covariates
@EM_cols = The subset of columns that the EM should be performed over, list
"""
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)
enet.fit(X,Y)
if verbose==1:
print(enet.score(X,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X.columns
Be.index=Y.columns
Yhat=pd.DataFrame(enet.predict(X))
Yhat.index=Y.index
Yhat.columns=Y.columns
if EM_cols is not None:
X_adjust=bayes_cov_col(Y,X,EM_cols,enet)
#print('done with EM')
#refit model
enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0004,max_iter=10000)
enet.fit(X_adjust,Y)
Yhat=pd.DataFrame(enet.predict(X_adjust))
Yhat.index=Y.index
Yhat.columns=Y.columns
if verbose==1:
print(enet.score(X_adjust,Y))
Be=pd.DataFrame(enet.coef_)
Be.columns=X_adjust.columns
Be.index=Y.columns
else:
X_adjust=X.copy()
RES_out=Y.subtract(Yhat)
return(Be,X_adjust,RES_out)
def count_27(B1,B2,B3,thresh=0.01):
vecs1=[B1<(-thresh),np.abs(B1)<=thresh,B1>thresh]
vecs2=[B2<(-thresh),np.abs(B2)<=thresh,B2>thresh]
vecs3=[B3<(-thresh),np.abs(B3)<=thresh,B3>thresh]
COUNTER=[]
for i in range(3):
for j in range(3):
for k in range(3):
COUNTER.append(np.sum(np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])))
return COUNTER
def return_sorted_list(in1):
output = [0] * len(in1)
for i, x in enumerate(sorted(range(len(in1)), key=lambda y: in1[y])):
output[x] = i
return np.array(output)
def index_27(B1,B2,B3,df_order,thresh=0.01):
vecs1=[B1<(-thresh),np.abs(B1)<=thresh,B1>thresh]
vecs2=[B2<(-thresh),np.abs(B2)<=thresh,B2>thresh]
vecs3=[B3<(-thresh),np.abs(B3)<=thresh,B3>thresh]
Ball=pd.concat([B1,B2,B3],axis=1)
iarray=pd.DataFrame(['none']*len(B1))
iarray.index=B1.index
for i in range(3):
for j in range(3):
for k in range(3):
totsum=int(np.sum(np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])))
iarray[np.logical_and(np.logical_and(vecs1[i],vecs2[j]),vecs3[k])]=str(i-1)+' '+str(j-1)+' '+str(k-1)
iarray['type']=['none']*len(B1)
iarray['order']=[0]*len(B1)
iarray['effect']=[0]*len(B1)
numbering=0
for i in range(len(df_order)):
curgroup=df_order.index[i]
curtype=df_order.ix[i,'type']
matches=iarray[0]==curgroup
nummatches=np.sum(matches)
if nummatches>0:
Bmatch=Ball[matches]
intarray=[int(x) for x in curgroup.split(' ')]
Bmod=Bmatch.copy()
l=0
for col in Bmod.columns:
Bmod[col]=intarray[l]*Bmod[col]
l+=1
Bsum=pd.DataFrame(Bmod.sum(axis=1))
ordervec=return_sorted_list(-np.array(Bsum[0]))
ordervec=ordervec+numbering
iarray.ix[matches,'type']=curtype
iarray.ix[matches,'order']=ordervec
iarray.ix[matches,'effect']=np.array(Bsum[0])
numbering+=np.max(ordervec)+1
return iarray
def hyper_overlap(genes1,genes2,M):
curoverlap=genes1.intersection(genes2)
x=len(curoverlap)
n=len(genes1)
N=len(genes2)
pval=1.0-scipy.stats.hypergeom.cdf(x,M, n, N)
return pval
def hyper_category(df_cats,genes_in):
pvals=[]
cat_un=np.unique(df_cats[0])
genes2=set(genes_in).intersection(set(df_cats.index))
for cat in cat_un:
genes1=set(df_cats[df_cats[0]==cat].index)
pvals.append(hyper_overlap(genes1,genes2,len(df_cats)))
df_pvals=pd.DataFrame(-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(pvals)[1]))
df_pvals.index=cat_un
return df_pvals
def numbins(x):
iqr=((np.percentile(x, 75) - np.percentile(x, 25)))
if iqr==0.0:
return int(np.ceil(np.sqrt(len(x))))
else:
bins=int(np.ceil((np.max(x)-np.min(x))/((iqr)/np.power(len(x),0.33333333))))
return bins
def get_1sidepval(B,joint,edges,gsums,gvar,nguides):
Bpval=B.copy()
#create index lookup for each gene to the pairs
genevec=np.array(range(len(gsums)))
guidevec=np.array(range(len(nguides)))
gsums=np.array(gsums)
gvar=np.array(gvar)
nguides=np.array(nguides)
rowindex_dict={}
colindex_dict={}
for i in range(len(edges[0])-1):
for j in range(len(edges[1])-1):
logical_gsums=np.logical_and(gsums>=edges[0][i],gsums<edges[0][i+1])
logical_gvar=np.logical_and(gvar>=edges[1][j],gvar<edges[1][j+1])
logical_both=np.logical_and(logical_gsums,logical_gvar)
if np.sum(logical_both)>0:
rowindex_dict[(i,j)]=genevec[logical_both]
for i in range(len(edges[2])-1):
logical_nguides=np.logical_and(nguides>=edges[2][i],nguides<edges[2][i+1])
if np.sum(logical_nguides)>0:
colindex_dict[i]=guidevec[logical_nguides]
maxedges=len(edges[3])-2
for key in rowindex_dict.keys():
for guidekey in colindex_dict.keys():
curjoint=joint[key[0]][key[1]][guidekey]
curjoint /= curjoint.sum()
curjoint=pd.DataFrame(curjoint)
curjoint.index=edges[3][:-1]
curjoint=curjoint.cumsum()
curmat=Bpval.ix[rowindex_dict[key],colindex_dict[guidekey]]
lookup_mat=curmat.copy()
bp=pd.DataFrame(np.searchsorted(curjoint.index,curmat))
bpmax=bp>maxedges
bp[bpmax]=0
for i in range(np.shape(bp)[1]):
lookup=1.0-np.round(np.array(curjoint)[bp.ix[:,i]],10)
lookup_mat.ix[:,i]=lookup
lookup_mat.ix[np.where(bpmax)]=0
Bpval.ix[rowindex_dict[key],colindex_dict[guidekey]]=lookup_mat
Bpval[B<=0]=1.0
return Bpval
#create permuted coefficient matrix
def shuffle_mat(X,Xother,Y):
flag=0
X_shuffle=X.copy()
X_shuffle.index=np.random.permutation(X.index)
X_shuffle=X_shuffle.loc[Y.index]
X3_shuffle=pd.concat([X_shuffle,Xother],axis=1)
return X3_shuffle
def make_simple_shufs(X,Xother,Y,modalpha=0.005,shufnum=3):
Be_shuffs=pd.DataFrame()
flag=0
for i in range(shufnum):
print(i)
X3_shuffle=shuffle_mat(X,Xother,Y)
Be_shuf,X_adjust,RES=run_model(Y,X3_shuffle,modalpha=modalpha,verbose=0)
if flag==0:
Be_shuffs=Be_shuf
flag=1
else:
Be_shuffs=pd.concat([Be_shuffs,Be_shuf])
return Be_shuffs
def make_shufs(X,Xother,Y,shufnum=3,modalpha=0.005,verbose=1):
Be_shuffs=pd.DataFrame()
flag=0
for i in range(shufnum):
if verbose==1:
print(i)
X3_shuffle=shuffle_mat(X,Xother,Y)
Be_shuf,X_adjust,RES=run_model_bycol(Y,X3_shuffle,modalpha=modalpha,EM_cols=X.columns,verbose=0)
if flag==0:
Be_shuffs=Be_shuf
flag=1
else:
Be_shuffs=pd.concat([Be_shuffs,Be_shuf])
return Be_shuffs
def make_shufs_linear_sub(X,Xother,Y,shufnum=3):
Be_shuffs=pd.DataFrame()
flag=0
for i in range(shufnum):
X3_shuffle=shuffle_mat(X,Xother,Y)
from sklearn import linear_model
lm=linear_model.Ridge(fit_intercept=True,max_iter=10000)
lm.fit(X3_shuffle,Y)
Be_shuf=pd.DataFrame(lm.coef_)
Be_shuf.index=Y.columns
Be_shuf.columns=X3_shuffle.columns
if flag==0:
Be_shuffs=Be_shuf
flag=1
else:
Be_shuffs=pd.concat([Be_shuffs,Be_shuf])
return Be_shuffs
def make_shufs_linear(X,Y,shufnum=3):
Be_shuffs=pd.DataFrame()
flag=0
for i in range(shufnum):
X_shuffle=X.copy()
X_shuffle.index=np.random.permutation(X.index)
X_shuffle=X_shuffle.loc[Y.index]
from sklearn import linear_model
lm=linear_model.Ridge(fit_intercept=True,max_iter=10000)
lm.fit(X_shuffle,Y)
Be_shuf=pd.DataFrame(lm.coef_)
Be_shuf.index=Y.columns
Be_shuf.columns=X_shuffle.columns
if flag==0:
Be_shuffs=Be_shuf
flag=1
else:
Be_shuffs=pd.concat([Be_shuffs,Be_shuf])
return Be_shuffs
#get FDR matrix
def fdr_coefs(B,B_shuf,gsums,gvar,nguides,mybins=[30,30,20,1000]):
numshufs=(1.0*len(B_shuf))/len(B)
if numshufs%1!=0:
print('you screwed up permuted is not integer multiple of nonpermuted')
return
numshufs=int(numshufs)
gsums_rep=np.array([list(gsums)]*numshufs).flatten()
gvar_rep=np.array([list(gvar)]*numshufs).flatten()
nguides=np.array(nguides)
flag=0
for i in range(np.shape(B_shuf)[1]):
datas=pd.DataFrame([gsums_rep,gvar_rep,np.array([nguides[i]]*len(gsums_rep)),np.array(B_shuf.ix[:,i])]).T
if flag==0:
SHUFCOV=datas
flag=1
else:
SHUFCOV=pd.concat([SHUFCOV,datas])
numBins = mybins # number of bins in each dimension
SHUFPOS=SHUFCOV.copy()
SHUFPOS=SHUFPOS[SHUFPOS[3]>=0]
joint_pos, edges_pos = np.histogramdd(np.array(SHUFPOS), bins=numBins)
joint_pos /= joint_pos.sum()
SHUFNEG=SHUFCOV.copy()
SHUFNEG=SHUFNEG[SHUFNEG[3]<=0]
SHUFNEG[3]=SHUFNEG[3].abs()
joint_neg, edges_neg = np.histogramdd(np.array(SHUFNEG), bins=numBins)
joint_neg /= joint_neg.sum()
print('Created 4D Null Distributions')
B_sign = np.sign(B)
Bpos=B.copy()
Bpos[B<0]=0
Bneg=B.copy()
Bneg[B>0]=0
Bneg=Bneg.abs()
Bpval_pos=get_1sidepval(Bpos,joint_pos,edges_pos,gsums,gvar,nguides)
print('positive pvals calculated')
Bpval_neg=get_1sidepval(Bneg,joint_neg,edges_neg,gsums,gvar,nguides)
print('negative pvals calculated')
BFDR=Bpval_pos.copy()
BFDR[Bpval_neg<1]=Bpval_neg[Bpval_neg<1]
for col in BFDR.columns:
curcol=BFDR[col]
curcol_logical=curcol<1
BFDR.ix[curcol_logical,col]=-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(curcol[curcol_logical])[1])
BFDR=np.multiply(B_sign,BFDR)
print('FDR correction performed')
return BFDR
#get FDR matrix
def fdr_colwise_coefs(B,B_shuf):
BFDR=B.copy()
for col in BFDR.columns:
curcol=B[col]
curfdr=BFDR[col]
curecdf=ECDF(B_shuf[col])
curcol_pos=curcol>0
curcol_neg=curcol<0
sign_col=np.sign(curcol)
curfdr[curcol_pos]=-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(1.0-curecdf(curcol[curcol_pos]))[1])
curfdr[curcol_neg]=np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(curecdf(curcol[curcol_neg]))[1])
BFDR[col]=curfdr
print('FDR correction performed')
return BFDR
def pointwise_p_colwisefdr(B,Bshuf):
BFDR=B.copy()
for col in B.columns:
probs=[]
sign=[]
for ind in B.index:
curecdf=ECDF(Bshuf[col].ix[ind])
curval=B[col].ix[ind]
if curval>0:
sign.append(1)
probs.append(1.0-curecdf(B[col].ix[ind]))
else:
sign.append(-1)
probs.append(curecdf(B[col].ix[ind]))
probs=np.array(probs)
sign=np.array(sign)
BFDR[col]=sign*(-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(probs)[1]))
return BFDR
def pointwise_p_rowwisefdr(B,Bshuf):
BFDR=B.copy()
SIGN=B.copy()
for col in B.columns:
probs=[]
sign=[]
for ind in B.index:
curecdf=ECDF(Bshuf[col].ix[ind])
curval=B[col].ix[ind]
if curval>0:
sign.append(1)
probs.append(1.0-curecdf(B[col].ix[ind]))
else:
sign.append(-1)
probs.append(curecdf(B[col].ix[ind]))
probs=np.array(probs)
sign=np.array(sign)
SIGN[col]=sign
BFDR[col]=probs
#rowwise FDR
for ind in B.index:
BFDR.ix[ind,:]=SIGN.ix[ind,:]*(-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(BFDR.ix[ind,:])[1]))
return BFDR
def fregression_fdr(X,Y,B):
FDR=pd.DataFrame()
for i in Y.columns:
pvals=-np.log10(sklearn.feature_selection.f_regression(X, Y[i])[1])
FDR[i]=pvals
FDR.index=X.columns
FDR=FDR.T
FDR=np.sign(B)*FDR
return FDR
def compare_fdrs(BFDR,BFDR_down,thresh1=1.3,thresh2=1.3):
COMPARE=pd.DataFrame()
for col in BFDR.columns:
col1=BFDR[col]
col2=BFDR_down[col]
a=np.sign(col1)*(col1.abs()>thresh1)
b=np.sign(col2)*(col2.abs()>thresh2)
CONF=sklearn.metrics.confusion_matrix(a,b,labels=[-1,0,1])
tp=CONF[0][0]+CONF[2][2]
fp=CONF[0][2]+CONF[2][0]+CONF[1][0]+CONF[1][2]
fn=CONF[0][1]+CONF[2][1]
tn=CONF[1][1]
sensitvitiy=np.divide(1.0*tp,tp+fn)
specificity=np.divide(1.0*tn,tn+fp)
COMPARE[col]=[tp,tn,fp,fn,sensitvitiy,specificity]
COMPARE.index=['TP','TN','FP','FN','Sensitivity','Specificity']
return(COMPARE)
#############data filtering############
def fano_variable(DGEtpm,input_mean=None,meanthresh=0.5,resthresh=0.05,f=0.25,highlight_genes=None,plot=0):
#get mean and std for each gene
if input_mean is None:
popmean=np.log2(np.mean(DGEtpm,axis=1)+1)
else:
popmean=input_mean
popstd=np.std(np.log2(DGEtpm+1),axis=1)#np.divide(np.std(DGEtpm,axis=1),popmean)
thresh=meanthresh
x=popmean[np.array(popmean>thresh)]
y=popstd[np.array(popmean>thresh)]
DGE_fit=DGEtpm[np.array(popmean>thresh)]
#fit line
lowess = sm.nonparametric.lowess
lz_pred = lowess(y, x,frac=f,return_sorted=False)
residuals=y-lz_pred
if plot==1:
plt.scatter(x,y,c=['red' if z>resthresh else 'blue' for z in residuals])
plt.xlabel('log2(Population Mean)')
plt.ylabel('Standard Deviation')
df_res=pd.DataFrame()
df_res['residuals']=residuals
df_res['mean']=x
df_res['std']=y
df_res.index=DGE_fit.index
if highlight_genes:
if plot==1:
subset=df_res.loc[highlight_genes].dropna()
for thisgene in subset.index:
df_tmp=subset.loc[thisgene]
plt.text(df_tmp['mean'],df_tmp['std'],thisgene,fontsize=16)
return df_res
#return variable genes
##########PCA stuff#########################
def fb_pca(DGE,k=50):
if 'fbpca' in sys.modules:
[Ufb,Sfb,Vfb]=fbpca.pca(DGE,k)
else:
pca=sklearn.decomposition.PCA(n_components=k)
pca.fit(DGE)
Ufb=pca.fit_transform(DGE)
Sfb=pca.explained_variance_
Vfb=pca.components_
Vfb=pd.DataFrame(Vfb).T
Vfb.index=DGE.columns
Ufb=pd.DataFrame(Ufb)
Ufb.index=DGE.index
return Ufb,Sfb,Vfb
def project_ontoPC(U,DGE):
DGE_t=DGE.copy().T
Vfb_new=pd.DataFrame()
#sscells=DGE_t.pow(2).sum(axis=1)
for i in range(np.shape(U)[1]):
Vfb_new[i]=DGE_t.dot(U[i])
Vfb_new=Vfb_new.T
Vfb_new=Vfb_new#/sscells
return Vfb_new.T
def columnwise_compare_innermax(U1,U2):
U_big=U1.copy()
U_big=U_big.merge(U2,left_index=True,right_index=True)
U_big.columns=range(len(U_big.columns))
genes1=set(U1.index)
genes2=set(U2.index)
jac=np.divide(1.0*len(genes1.intersection(genes2)),len(genes1.union(genes2)))
print('jaccard gene overlap =',jac)
cols1=np.shape(U1)[1]
cols2=np.shape(U2)[1]
mincols=np.min([cols1,cols2])
print(np.shape(U_big))
comparevec=[]
for i in range(mincols):
corrs=[]
for j in range(mincols):
corrs.append(np.abs(np.corrcoef(U_big.ix[:,j],U_big.ix[:,i+cols1])[0][1]))
comparevec.append(np.max(corrs))
return comparevec
def PC_noise(DGEZ,noiselevels=np.linspace(-2,2,20),reps=3,sig_pcs=40):
PC_cor=pd.DataFrame()
[Ufb,Sfb,Vfb]= fb_pca(DGEZ,k=sig_pcs)
for noise in noiselevels:
df_noise=pd.DataFrame()
for rep in range(reps):
DGE_Z_wnoise=DGEZ+np.random.normal(0,np.power(10.0,noise),np.shape(DGEZ))
[Ufb_noise,Sfb_noise,Vfb_noise]=fb_pca(DGE_Z_wnoise,k=sig_pcs)
comparevec=[]
for i in range(sig_pcs):
corrs=[]
for j in range(sig_pcs):
corrs.append(np.abs(np.corrcoef(Ufb.ix[:,j],Ufb_noise.ix[:,i])[0][1]))
comparevec.append(np.max(corrs))
df_noise[rep]=comparevec
PC_cor[noise]=df_noise.mean(axis=1)
return PC_cor
def jackstraw(DGEZ,per=0.005,sig_pcs=40,reps=100,verbose=0):
"""substitute small percentage of features with permuted versions, compare actual to permuted to obtain significance"""
ngenes=len(DGEZ)
[Ufb,Sfb,Vfb]= fb_pca(DGEZ,k=sig_pcs)
Ufb_null = pd.DataFrame()
flag=0
#repeatedly permute and recalculate null PC distributions
for i in range(reps):
if (verbose==1):
print('rep',i)
shuf_genes=np.random.choice(range(ngenes),size=int(np.ceil(ngenes*per)),replace=False)
DGEZ_perm=DGEZ.copy()
DGEZ_perm.ix[shuf_genes,:]=np.array(DGEZ_perm.ix[shuf_genes,np.random.permutation(range(np.shape(DGEZ)[1]))])
[Ufb_perm,Sfb_perm,Vfb_perm]= fb_pca(DGEZ,k=sig_pcs)
tmp_null=Ufb.ix[shuf_genes,:]
if flag==0:
Ufb_null=tmp_null
flag=1
else:
Ufb_null=pd.concat([Ufb_null,tmp_null])
PVALS=Ufb.copy()
for i in range(sig_pcs):
curecdf=ECDF(Ufb_null.ix[:,i])
curUfb=Ufb.ix[:,i]
isnegative=curUfb<0.0
ispositive=curUfb>=0.0
#statsmodels.sandbox.stats.multicomp.fdrcorrection0
PVALS.ix[isnegative,i]=np.log10(curecdf(Ufb.ix[isnegative,i]))
PVALS.ix[ispositive,i]=-np.log10(1-curecdf(Ufb.ix[ispositive,i]))
PVALS[PVALS>5]=5
PVALS[PVALS<(-5)]=-5
return PVALS
##########significance testing##############
def ttest_DGE(DGE1,DGE2,batch=None):
"""Ttest with batchwise comparison capability"""
FC=[]
PVALS=[]
A=DGE1.T
B=DGE2.T
if batch is None:
for gene in A:
difmean=np.mean(A[gene])-np.mean(B[gene])
ttest=scipy.stats.ttest_ind(A[gene],B[gene],equal_var=False)
FC.append(difmean)
PVALS.append(np.sign(difmean)*(-np.log10(ttest[1])))
else:
batchun=np.unique(batch[0])
for gene in A:
difmean=[]
pvals=[]
curA=A[gene]
curB=B[gene]
for curbatch in batchun:
curAbatch=curA[batch[0]==curbatch]
curBbatch=curB[batch[1]==curbatch]
curdiff=np.mean(curAbatch)-np.mean(curBbatch)
difmean.append(curdiff)
ttest=scipy.stats.ttest_ind(curAbatch,curBbatch,equal_var=False)
pvals.append(np.sign(curdiff)*(-np.log10(ttest[1])))
FC.append(np.mean(difmean))
PVALS.append(np.sum(pvals))
return np.array(FC),np.array(PVALS)
def within_across(B1,B2):
"""Compare correlations between two sets of regression coefficients"""
D1=B1.copy()
D2=B2.copy()
D1=D1.groupby(D1.index).mean()
D2=D2.groupby(D2.index).mean()
genesin=list(set(D1.index).intersection(set(D2.index)))
D1=D1.loc[genesin]
D2=D2.loc[genesin]
cor_list=[]
compare_list=[]
for col1 in D1.columns:
for col2 in D2.columns:
if col1 in D2.columns:
cur1=D1[col1]
cur2=D2[col2]
gene1=guide2gene(col1)
gene2=guide2gene(col2)
curcor=np.corrcoef(cur1,cur2)[0][1]
cor_list.append(curcor)
if col1==col2:
compare_list.append('same guide')
elif gene1==gene2:
compare_list.append('within')
else:
compare_list.append('across')
df_compare=pd.DataFrame(compare_list)
df_compare['correlations']=np.array(cor_list)
return df_compare
def within_without(B):
"""Compare correlations between perturbations targeting the same gene"""
COR=pd.DataFrame(np.corrcoef(B.T))
COR.index=B.columns
COR.columns=B.columns
count1=0
samesies=[]
difsies =[]
intsies = []
for guide1 in COR.columns:
count2=0
for guide2 in COR.columns:
if (count1>count2)&(('NTC' not in guide1)&('NTC' not in guide2))&(('INTERGENIC' not in guide1)&('INTERGENIC' not in guide2)):
underscore_pos = []
for x in re.finditer('_',guide1):
underscore_pos.append(x.span()[1])
thisgene1=guide1[underscore_pos[0]:underscore_pos[1]-1]
underscore_pos = []
for x in re.finditer('_',guide2):
underscore_pos.append(x.span()[1])
thisgene2=guide2[underscore_pos[0]:underscore_pos[1]-1]
if thisgene1==thisgene2:
samesies.append(COR[guide1][guide2])
else:
difsies.append(COR[guide1][guide2])
elif (count1>count2):
intsies.append(COR[guide1][guide2])
count2+=1
count1+=1
return np.array(samesies),np.array(difsies),np.array(intsies)
#####GO analysis################
def PCA2GO(DGEZ,sigpcs=15,thresh=2,repin=100,perin=0.005,fdr_thresh=0.1,species='human'):
#Jackstraw
PVALS=jackstraw(DGEZ,sig_pcs=sigpcs,reps=repin,per=perin)
print('done with jackstraw')
#go analysis
path2db='Path2obo/db/'
obodag = GODag(path2db+"go-basic.obo")
if (species=='human'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[9606])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = DGEZ.index
Xtable=pd.read_csv('Path2Xref/hg19_xref.txt',sep='\t')
Xtable.index=Xtable['Approved Symbol']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
elif(species=='mouse'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[10090])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = DGEZ.index
Xtable=pd.read_csv('Path2xref/biomart_xref.mm10.txt',sep='\t')
Xtable=Xtable[['Associated Gene Name','EntrezGene ID']].dropna()
Xtable.index=Xtable['Associated Gene Name']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
goeaobj = GOEnrichmentStudy(
entrez, # List of mouse protein-coding genes
geneid2gos, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = 0.05, # default significance cut-off
methods = ['fdr_bh']) # defult multipletest correction method
df_bigGO=pd.DataFrame()
count=0
ngenes=len(PVALS)
for pc in PVALS.columns:
print(pc)
df_GO=pd.DataFrame()
curU=PVALS[pc]
meanx=np.mean(curU)
stdx=np.std(curU)
threshlow=meanx-thresh*stdx
threshhigh=meanx+thresh*stdx
lookup_entrez_high=[int(x) for x in np.unique(Xtable.loc[curU[curU>threshhigh].index].dropna()['EntrezGene ID'])]
lookup_entrez_low=[int(x) for x in np.unique(Xtable.loc[curU[curU<threshlow].index].dropna()['EntrezGene ID'])]
# 'p_' means "pvalue". 'fdr_bh' is the multipletest method we are currently using.
goea_results_high = goeaobj.run_study(lookup_entrez_high)
indexlist=[]
if len(lookup_entrez_high)>0:
for i in range(len(goea_results_high)):
if goea_results_high[i].p_fdr_bh<fdr_thresh:
df_GO[i]=[-np.log10(goea_results_high[i].p_fdr_bh)]
indexlist.append(goea_results_high[i].name)
if len(lookup_entrez_low)>0:
goea_results_low = goeaobj.run_study(lookup_entrez_low)
for j in range(len(goea_results_high)):
if goea_results_low[j].p_fdr_bh<fdr_thresh:
df_GO[j+len(goea_results_high)+1]=[np.log10(goea_results_low[j].p_fdr_bh)]
indexlist.append(goea_results_low[j].name)
if(np.shape(df_GO)[0]==0):
df_GO[0]=[0]
df_GO.index=['NoGO']
else:
df_GO=df_GO.T
df_GO.index=indexlist
df_GO.columns=[pc]
df_GO=df_GO.groupby(df_GO.index).first()
if count==0:
df_bigGO=df_GO
count=1
else:
df_bigGO=df_bigGO.merge(df_GO,how='outer',left_index=True,right_index=True)
df_bigGO=df_bigGO.fillna(0)
if 'NoGO' in df_bigGO.index:
df_bigGO=df_bigGO.drop('NoGO')
return df_bigGO
def DE2GO(df_p,background,sig_thresh=3,num_genes=None,fdr_thresh=0.1,species='human'):
#go analysis
path2db='PATH2GOobofile/db/'
obodag = GODag(path2db+"go-basic.obo")
if (species=='human'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[9606])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = background
Xtable=pd.read_csv('PATH2hg19Xref/hg19_xref.txt',sep='\t')
Xtable.index=Xtable['Approved Symbol']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
elif(species=='mouse'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[10090])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = background
Xtable=pd.read_csv('PATH2mm10xref/biomart_xref.mm10.txt',sep='\t')
Xtable=Xtable[['Associated Gene Name','EntrezGene ID']].dropna()
Xtable.index=Xtable['Associated Gene Name']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
goeaobj = GOEnrichmentStudy(
entrez, # List of mouse protein-coding genes
geneid2gos, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = 0.05, # default significance cut-off
methods = ['fdr_bh']) # defult multipletest correction method
df_bigGO=pd.DataFrame()
count=0
ngenes=len(df_p)
xtable_genes=set(Xtable.index)
for cluster in df_p.columns:
print(cluster)
df_GO=pd.DataFrame()
cur_cluster=df_p[cluster]
threshlow=(-sig_thresh)#np.percentile(cur_cluster,100.0*(1-np.divide(num_genes,ngenes)))
threshhigh=sig_thresh#np.percentile(cur_cluster,100.0*(np.divide(num_genes,ngenes)))
genes_high=cur_cluster[cur_cluster>threshhigh].index
genes_high=list(xtable_genes.intersection(set(genes_high)))
genes_low=cur_cluster[cur_cluster<threshlow].index
genes_low=list(xtable_genes.intersection(set(genes_low)))
lookup_entrez_high=[int(x) for x in np.unique(Xtable.loc[genes_high].dropna()['EntrezGene ID'])]
lookup_entrez_low=[int(x) for x in np.unique(Xtable.loc[genes_low].dropna()['EntrezGene ID'])]
# 'p_' means "pvalue". 'fdr_bh' is the multipletest method we are currently using.
indexlist=[]
if len(lookup_entrez_high)>2:
goea_results_high = goeaobj.run_study(lookup_entrez_high)
for i in range(len(goea_results_high)):
if goea_results_high[i].p_fdr_bh<fdr_thresh:
df_GO[i]=[-np.log10(goea_results_high[i].p_fdr_bh)]
indexlist.append(goea_results_high[i].name)
highlen=len(goea_results_high)
else:
highlen=0
if len(lookup_entrez_low)>2:
goea_results_low = goeaobj.run_study(lookup_entrez_low)
for j in range(len(goea_results_low)):
if goea_results_low[j].p_fdr_bh<fdr_thresh:
df_GO[j+highlen+1]=[np.log10(goea_results_low[j].p_fdr_bh)]
indexlist.append(goea_results_low[j].name)
if(np.shape(df_GO)[0]==0):
df_GO[0]=[0]
df_GO.index=['NoGO']
else:
df_GO=df_GO.T
df_GO.index=indexlist
df_GO.columns=[cluster]
df_GO=df_GO.groupby(df_GO.index).first()
if count==0:
df_bigGO=df_GO
count=1
else:
df_bigGO=df_bigGO.merge(df_GO,how='outer',left_index=True,right_index=True)
df_bigGO=df_bigGO.fillna(0)
if 'NoGO' in df_bigGO.index:
df_bigGO=df_bigGO.drop('NoGO')
return df_bigGO
def TOPN2GO(df_p,background,num_genes=100,fdr_thresh=0.1,species='human'):
#go analysis
path2db='Path2obo/db/'
obodag = GODag(path2db+"go-basic.obo")
if (species=='human'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[9606])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = background
Xtable=pd.read_csv('PATH2xfref/hg19_xref.txt',sep='\t')
Xtable.index=Xtable['Approved Symbol']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
elif(species=='mouse'):
geneid2gos = read_ncbi_gene2go(path2db+"gene2go", taxids=[10090])
print("{N:,} annotated genes".format(N=len(geneid2gos)))
these_genes = background
Xtable=pd.read_csv('PATH2xref/biomart_xref.mm10.txt',sep='\t')
Xtable=Xtable[['Associated Gene Name','EntrezGene ID']].dropna()
Xtable.index=Xtable['Associated Gene Name']
entrez=[int(x) for x in np.unique(Xtable.loc[these_genes].dropna()['EntrezGene ID'])]
goeaobj = GOEnrichmentStudy(
entrez, # List of mouse protein-coding genes
geneid2gos, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = 0.05, # default significance cut-off
methods = ['fdr_bh']) # defult multipletest correction method
df_bigGO= | pd.DataFrame() | pandas.DataFrame |
import math
import multiprocessing
import time
from functools import lru_cache, partial
from multiprocessing import Pool
import pandas as pd
from numpy.random import shuffle
from retry.api import retry_call
from ..mongodb import get_db
from ..scripts.trading_calendar import is_trading_day
from ..setting.constants import MAX_WORKER
from ..utils import batch_loop, data_root, ensure_dtypes, make_logger
from ..utils.db_utils import to_dict
from ..websource.wy import fetch_cjmx
logger = make_logger('成交明细')
DATE_FMT = r'%Y-%m-%d'
def _last_5():
"""最近的5个交易日"""
db = get_db()
try:
return db['交易日历'].find_one()['last_month'][-5:]
except Exception:
today = pd.Timestamp('today').normalize()
dates = pd.date_range(today - pd.Timedelta(days=5), today)
return [d.to_pydatetime() for d in dates]
def _wy_fix_data(df):
dts = df.日期.dt.strftime(DATE_FMT) + ' ' + df.时间
df['成交时间'] = pd.to_datetime(dts)
del df['时间']
del df['日期']
df = df.rename(columns={'价格': '成交价', '涨跌额': '价格变动', '方向': '性质'})
df = ensure_dtypes(df,
d_cols=['成交时间'],
s_cols=['股票代码', '性质'],
i_cols=['成交量'],
f_cols=['成交价', '成交额'])
# 保留2位小数
df = df.round({'价格变动': 2, '成交额': 2, '成交价': 2})
df.fillna(0.0, inplace=True)
return df
def bacth_refresh(codes, timestamp):
db = get_db('cjmx')
date_str = timestamp.strftime(DATE_FMT)
collection = db[date_str]
if collection.estimated_document_count() == 0:
create_index(collection)
status = {}
for code in codes:
try:
df = retry_call(fetch_cjmx, [code, date_str],
delay=0.3,
tries=3,
logger=logger)
if not df.empty:
df = _wy_fix_data(df)
collection.insert_many(to_dict(df))
logger.info(f'股票 {code} {date_str} 共 {len(df):>5} 行')
status[code] = True
except Exception as e:
logger.info(f'股票 {code} 日期 {date_str} {e!r}')
status[code] = False
failed = [k for k, v in status.items() if not v]
if len(failed):
logger.warning(f'{date_str} 以下股票成交明细提取失败')
logger.warning(failed)
return len(failed) == 0
def was_traded(db, code, timestamp):
collection = db[code]
filter = {'日期': timestamp, '成交量': {'$gt': 0}}
if collection.find_one(filter, {'_id': 1}):
return True
else:
return False
@lru_cache(None)
def get_traded_codes(timestamp):
"""当天交易的股票代码列表"""
db = get_db('wy_stock_daily')
codes = db.list_collection_names()
return [code for code in codes if was_traded(db, code, timestamp)]
def completed_codes(timestamp):
"""已经下载的股票代码"""
db = get_db('cjmx')
collection = db[timestamp.strftime(DATE_FMT)]
return collection.distinct('股票代码')
def _refresh(timestamp):
"""刷新指定日期成交明细数据(只能为近5天)"""
t_codes = get_traded_codes(timestamp)
d_codes = completed_codes(timestamp)
codes = list(set(t_codes).difference(set(d_codes)))
if len(codes) == 0:
return True
shuffle(codes)
logger.info(f'{timestamp.strftime(DATE_FMT)} 共 {len(codes)} 股票')
completed = bacth_refresh(codes, timestamp)
return completed
def refresh(timestamp):
"""刷新指定日期成交明细数据(只能为近5天)"""
for i in range(1, 4):
logger.info(f"第{i}次尝试 {timestamp}")
completed = _refresh(timestamp)
if completed:
break
def create_index(collection):
collection.create_index([("成交时间", -1)], name='dt_index')
collection.create_index([("股票代码", 1)], name='code_index')
def refresh_last_5():
"""刷新最近5天成交明细"""
tdates = [ | pd.Timestamp(d) | pandas.Timestamp |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.dates as mdates
import math
### PORTFOLIO
# 24% EQUITY
# 18% FIXED INCOME
# 19% GOLD
# 18% COMDTY TREND/GLOBAL MACRO
# 21% LONG VOL
### EQUITY
# 80% GLOBAL, 19.2% of tot
# 20% EM, 4.8% of tot
### FIXED INCOME
# US TSY 50%, 9% of tot
# Corp bonds, 25% 4.5% of tot
# EM BONDS, 25%, 4.5% of tot
### GOLD
# GLD 90%, 17.1% of tot
# GDX 10%, 1.9 of tot
### COMDTY TREND+GLOBAL MACRO
# LYNX 75%, 13.5% of tot
# SEB ASSET SELECTION C LUX 25%, 4.5% of tot
# IPM SYSTEMATIC MACRO UCITS 0%
# NORDKINN 0%
### LONG VOL
# AMUNDI 100%, 21% of tot
# LATEST DATE (START MONTH) 2007-11, AMUNDI
### GLOBAL VARIABLES / DATA ###
start_date = '2007-11'
years = 13.167
global_data_raw = pd.read_csv('MSCI_World_SEK.csv')
global_data_raw = global_data_raw.set_index('Date')
global_data = pd.DataFrame(global_data_raw.loc[start_date:])
# us_data_raw = pd.read_csv('SPP_aktiefond_USA.csv')
# us_data_raw = us_data_raw.set_index('Date')
# us_data = pd.DataFrame(us_data_raw.loc[start_date:])
# avanza_zero_raw = pd.read_csv('Avanza_zero.csv')
# avanza_zero_raw = avanza_zero_raw.set_index('Date')
# avanza_zero = pd.DataFrame(avanza_zero_raw.loc[start_date:])
em_data = pd.read_csv('MSCI_EM_SEK.csv')
em_data = em_data.set_index('Date')
em_stock = | pd.DataFrame(em_data.loc[start_date:]) | pandas.DataFrame |
'''
@author: <NAME>
@date: 7/16/2019
@title: HNSCC Functional Data Analysis Pipeline Library
The purpose of this script is to provide the resources to enable an automated
analysis pipeline of functional drug response data in the HNSCC project at
OHSU led by <NAME> and <NAME>.
'''
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import seaborn as sbn
import statsmodels.api as sm
import statsmodels
import os
import shutil
from sklearn.preprocessing import PolynomialFeatures
import warnings
from openpyxl import Workbook, load_workbook
def parse_pathname(path, verbose=False, path_sep='/'):
'''
parse relevant information from data path, must return
- lab_id [unique 5 digit patient identifier]
- norm [Blank490, 490]
- version_id <str>
file name must be in form:
lab_id=XXXXX-norm=XXXXXX-plate_version_id=XXXXX.xlsx
'''
if verbose: print('path', path)
name = path.split(path_sep)[-1][:-5].split('-')
labels = [n.split('=')[0] for n in name]
values = [n.split('=')[-1] for n in name]
assert len(labels) == 4, 'parsing error: atypical pathname - labels require: lab_id, norm, version_id, note - got: ' + str(labels)
assert len(values) == 4, 'parsing error: atypical pathname - values require 4 inputs for each label. - got:' + str(values)
if verbose: lab_id, norm, version_id, notes = values
if verbose: print('---------------------------------------------------------')
if verbose: print( 'please double check file name parsing is accurate')
if verbose: print( 'file name: %s' %path.split(path_sep)[-1])
if verbose: print('lab_id: %s' %lab_id)
if verbose: print('norm: %s' %norm)
if verbose: print('version_id: %s' %version_id)
if verbose: print('notes: %s' %notes)
if verbose: print('---------------------------------------------------------')
return values
def get_plate_map(map_path, verbose=False):
'''
get the plate mapping data from the excel plate map
input
map_path <str> local or absolute path to the plate map
output
<dataframe> plate mapping data. header = [plate_number, row, col, drug, conc, version_id]
'''
#print('map path:', map_path)
book = load_workbook(map_path, data_only=True)
sheets = list(book.sheetnames)
sheets.pop(0) # remove 'meta'
#print()
#print('sheets:', sheets)
meta = pd.read_excel(map_path, header=None, sheet_name='meta')
plate_version = meta[meta[0]=='version_id'].values[0][1]
num_sheets = int(len(sheets) / 2)
if verbose: print('plate version id is: %s' %plate_version)
if verbose: print('There are %d plates in this plate map' %num_sheets)
#map_data = pd.read_excel(map_path, header=0, sheet_name=list(range(1,((num_sheets*2)+1))))
#for key in map_data:
# print('key', key)
# print(map_data[key])
# print(map_data[key].columns)
# print('#########################################################')
plates = []
for i,p in enumerate(range(0, num_sheets*2, 2)):
#? Really odd behavior with excel:
#! This only started occuring AFTER I implemented the plate_map renaming script. Now, when I read in the values (seems to be just for conc sheets)
#! The formula cells are read in as NA, by changing: , na_values = None, keep_default_na = False , we can prevent this for cells, however we
#! still have to rename the header, for some reason it doesn't get handled the same...
#! SOLVED: The openpyxl does not evaluate formulas, so when the renaming script saves the new excel plate maps, it has unevaluated equations.
#! to fix this issue, after running the renaming script, you have to open each excel file, go to each tab (should autocalculate) and THEN run this.
#! If you don't do this, it will result in a ton of NA values.
#! Solved better: when the renaming script opens the (backup) original platemaps, it loads in ONLY the data. So all equations are lost in downstream versions, however, issue solved.
conc_sheet = sheets[p]
inhib_sheet = sheets[p+1]
#print('----')
#print('conc', conc_sheet)
#print('inhib', inhib_sheet)
#print('----')
conc_raw = pd.read_excel(map_path, header=0, sheet_name=conc_sheet, na_values = None, keep_default_na = False)
#conc_raw.rename(columns={o:n for o,n in zip(conc_raw.columns, ['row'] + list(range(1,25)))}, inplace=True)
#print(conc_raw)
inhib_raw = pd.read_excel(map_path, header=0, sheet_name=inhib_sheet, na_values = None, keep_default_na = False)
#inhib_raw.rename(columns={o:n for o,n in zip(inhib_raw.columns, ['row'] + list(range(1,25)))}, inplace=True)
#print(inhib_raw)
concs = pd.melt( conc_raw, id_vars=['row'], var_name='col', value_name='conc')
drugs = pd.melt( inhib_raw, id_vars=['row'], var_name='col', value_name='inhibitor')
plate_map = concs.merge(drugs, on=['row','col'], how='outer')
plate_map = plate_map.assign(map_version_id = plate_version, plate_number=i+1)
plates.append(plate_map)
assert len(plates) == num_sheets, f'should be {num_sheets} plates, but only saw {len(plates)}'
dat = plates[0]
for p in plates[1:]:
dat = dat.append(p, ignore_index=True)
#print('number of concentration NA:', dat.conc.isna().sum())
#print('number of inhibitors NA:', dat.inhibitor.isna().sum())
assert dat.shape[0] == num_sheets*24*16, f'expected {num_sheets*24*16} non-zero rows from plate-map (based on {num_sheets} plates), but only got {dat.shape[0]} rows.'
return dat
class panel:
'''
This class represents a single HNSCC drug panel.
'''
def __init__(self, plate_path, platemap_dir, verbose=True, path_sep='/'):
'''
parse path name, load plate data, load plate map, check positive control
'''
self.plate_path = plate_path
self.platemap_dir = platemap_dir
self.verbose = verbose
lab_id, norm, version_id, notes = parse_pathname(plate_path,verbose=verbose, path_sep=path_sep)
self.msg_log = ''
self._log('\n--------------------------------------------------------------------------\nThis is the message log for: \n\tlab_id: %s\n\tversion_id: %s\n\tnotes: %s\n--------------------------------------------------------------------------' %(lab_id, version_id, notes))
assert len(lab_id) <= 5, 'lab_id is expected to be 5 or less characters long'
if len(lab_id) < 5:
print('Warning: lab_id is less than 5 characters long; appending 0 prefix')
lab_id = '0'*(5-len(lab_id)) + lab_id
self.lab_id = lab_id
#assert version_id in ['OHSU_HNSCC_derm001', 'OHSU_HNSCC_derm002'], 'unknown platemap ID, please double check pathname or update script with acceptable plate IDs'
self.version_id = version_id
assert norm in ['blank490', '490', 'Blank490'], 'improper pathname value for "norm" [blank490, 490]'
self.blank490 = (norm in ['blank490', 'Blank490'])
self._log('Data has already been normalized by positive controls: %s' %str(self.blank490))
self.notes = notes
self.platemap_path = self.platemap_dir + 'HNSCC_plate_map-version_id=' + self.version_id + '.xlsx'
self.platemap = get_plate_map(self.platemap_path)
self._raw = self._get_plate_data()
def _get_plate_data(self):
'''
each plate has 16 rows of drug data, then a empty row, then the next plate. Varying number of plates
file name specifies if the spectrometer software has already normalized the data by positive controls
(eg. set optical_density = 0 when all cells are dead). This is noted by norm='Blank490'
To double check this however, we can make sure that there is a 26th column of the raw plate data
which is filled with 'blank490' if this is the case and empty otherwise.
inputs
None
outputs
dataframe
'''
allplates = pd.read_excel(self.plate_path, header=None)
nplates = (allplates.shape[0] + 1) / 18
#print('>>>>>>>>>>>>>>>>> number of plates (in data):', nplates)
assert nplates%1.0 == 0, 'atypical number of rows, check data format'
self._log('This assay has %d plates' %int(nplates))
plates = []
i = 1 # skip original header
warned = False
for p in range(int(nplates)):
dat = pd.DataFrame( allplates.values[i:(i+16),:])
# check for blank490 column ... confidence in proper documentation
if dat.shape[1] < 26:
if not warned: self._log('WARNING: This assay [lab_id=%s,notes=%s] does not have a "blank490" column (last col), please double check that the data has been normalized by the positive controls.' %(self.lab_id,self.notes))
warned = True
if self.blank490: self._log('WARNING: file name specifies blank490, but data does not appeaer to be postive control normalized.')
self.blank490=False
dat = dat.assign(plate_row = dat[0]).assign(norm_type = 'none', plate_num = p+1, lab_id = self.lab_id, assay_version_id=self.version_id, note=self.notes).drop(labels = [0], axis='columns')
else:
dat = dat.assign(plate_row = dat[0]).assign(norm_type = dat[25], plate_num = p+1, lab_id = self.lab_id, assay_version_id=self.version_id, note=self.notes).drop(labels = [0,25], axis='columns')
dat = pd.melt(dat, id_vars=['lab_id', 'norm_type', 'plate_num', 'plate_row','assay_version_id', 'note'], value_vars=None, var_name='plate_col', value_name='optical_density', col_level=None)
plates.append( dat )
i += 16 + 2 # skip empty row + header
df = plates[0]
for p in plates[1:]:
df = df.append(p, ignore_index=True)
'''
print('>>>>>>>>>>>>>>>>>>> number of rows in data:', df.shape[0])
print(df.head())
print('unique rows', df.plate_row.unique())
print('unique columns', df.plate_col.unique())
print('unique plate numbers:', df.plate_num.unique())
print('OD # NA:', df.optical_density.isna().sum())
print('compare to platemap data')
print(self.platemap.head())
print('unique rows', self.platemap.row.unique())
print('unique columns', self.platemap.col.unique())
print('unique plate numbers:', self.platemap.plate_number.unique())
'''
return df
def _log(self, msg):
'''
'''
if self.verbose: print(msg)
self.msg_log += '>> ' + str(dt.now()) + ': ' + msg + '\n'
def map_data(self):
'''
'''
self._log('mapping data... [%s]' %self.version_id)
self.data = self._raw.merge(self.platemap, how='left', left_on=['plate_row','plate_col','plate_num'], right_on=['row','col','plate_number']).drop(['row','col','plate_number'], axis='columns')
self.data = self.data[~self.data.inhibitor.isna()]
self.data = self.data[~self.data.conc.isna()]
# remove any leading or trailing spaces
self.data = self.data.assign(inhibitor = [x.strip() for x in self.data.inhibitor])
self._log('mapping complete. \n\t mapped data shape: %s\n\t column names: [%r]' %(str(self.data.shape), self.data.columns.tolist()))
def normalize_combinationagent_concentrations(self):
'''
normalize combination drug assays concentrations by mapping into euclidean distance.
conc_norm = ( conc1**2 + conc2**2 )**(0.5) if combination_agent else conc
'''
self._log('normalizing combination agent concentrations...')
self.data = self.data.assign(iscomb= [';' in str(conc) for conc in self.data['conc']])
#! Remove this when done testing
'''
for i,x in self.data.iterrows():
try:
np.sqrt(np.sum([float(y)**2 for y in str(x.conc).split(';')]))
except:
print('failed')
print(x)
raise
'''
self.data = self.data.assign(conc_norm = [ [np.sqrt(np.sum(float(y)**2 for y in str(x).split(';')))].pop() for x in self.data['conc']])
def normalize_cell_viability_by_negative_controls(self):
'''
The zero value (positive control) of optical density is set by the p.spec software [blank490] but to interpret assay value as cell viability, we need to scale optical density by plate controls:
Cell_viab = opt_dens / plate_average_control (PAC)
[This sets PAC control value as cell viability of 1]
'''
self._log('normalizing cell viability by negative controls...')
# double check the data type is numeric
self.data = self.data.assign(optical_density = [float(x) for x in self.data.optical_density])
# get plate average controls
PAC = self.data[self.data['inhibitor'] == 'NONE'].groupby(by='plate_num')['optical_density'].mean().to_frame().assign(PAC = lambda x: x.optical_density).drop(['optical_density'], axis='columns')
self._log('plate average controls: \n%s' %str(PAC))
self.data = self.data.merge(PAC, how='left', on='plate_num')
self.data = self.data.assign(cell_viab = [od / pac for od, pac in zip(self.data['optical_density'], self.data['PAC'])])
# set a flag for low_pac (0.03)
self.data = self.data.assign(low_PAC_flag = self.data.PAC < 0.03)
def set_floor(self):
'''
There is still some contention here, Dan's protocol shifts the PAC up by the smallest value on plate.
[and therefore this step would need to come before negative control normalization]
I don't think that's a good idea though, rather, I'm going to round any negatives up to 0. These will be marked with 'is_adj'
FOR NOW - talk to the group
'''
self.data = self.data.assign(is_adj = self.data.cell_viab < 0, cell_viab = [0 if cv < 0 else cv for cv in self.data.cell_viab] )
def replicate_QC(self, method=['within','across'], flag_threshold = 1):
'''
linear regression model fit to each within-plate replicate separately
and AUC values are calculated. Replicates with AUC differences greater
than 100 are flagged. ?flag_name? [TODO] replicates are not normalized,
future probit calculations should use all data to fit the probit curve.
inputs
flag_threshold <float> : threshold to set flag for QC removal
method <str> : whether to average within plate or across plates.
outputs
none
'''
### DEPRECATED ### - not yet
def avg_plate_replicates(self, method=['within','across'], flag_threshold = 1):
'''
A ‘curve-free’ AUC (integration based on fine linear interpolation between
the 7 data points themselves) was calculated for those runs with within-panel
replicates after applying a ceiling of 1 for the normalized_viability.
The maximum change in AUC amongst the replicates was noted and those runs
with differences > 1 were removed.
Across-plate replicates are fit with linear regression and replicates with AUC differences > 0.75
are removed. Other replicates are averaged.
The HNSCC plates don't appear to have any within plate replicates, so this is an essentially unused method.
As such, it has been only lightly tested. Be wary of performance and robustness.
inputs
flag_threshold <float> : threshold to set flag for QC removal
method <str> : whether to average within plate or across plates.
outputs
none
'''
#warnings.warn('This method is deprecated and will be removed in future release. Use `replicate_QC()`` in the future.')
n = 0
replicates = []
toflag = []
fitdat = self.data[~self.data['inhibitor'].isin(['NONE', 'F-S-V', 'DMSO'])]
if method == 'within':
avgd_obs = []
self._log('averaging within plate replicates...')
for plate in set(fitdat['plate_num'].values):
platedat = fitdat[fitdat['plate_num'] == plate]
for inhib in set(platedat['inhibitor'].values):
assay = platedat[platedat['inhibitor'] == inhib]
if assay.shape[0] > 7:
n+=1
aucs = []
# groupby row - TODO: should double check that no replicates are on same row
for row in set(assay.plate_row):
repl = assay[assay['plate_row']==row]
aucs.append(self._get_lin_auc(repl))
if max(aucs) - min(aucs) > flag_threshold: toflag.append(inhib)
replicates.append(inhib)
assay.groupby(['lab_id','inhibitor','conc_norm', 'plate_num'])['cell_viab'].mean().to_frame().reset_index(level=['lab_id', 'inhibitor', 'conc_norm', 'plate_num']).assign(within_plate_repl_flag = (max(aucs) - min(aucs)) > flag_threshold)
avgd_obs.append(new_obs)
self._log('There were %d within plate replicates [%r]' %(n, replicates))
self._log('within plate replicates flagged: %r' %toflag)
if len(replicates) > 0:
# set is_repl flag, signifies that it has been averaged
repls = pd.DataFrame({'inhibitor': replicates, 'is_within_plate_repl': [True]*len(replicates)})
self.data = self.data.merge(repls, how='left', on='inhibitor').assign(is_within_plate_repl = lambda x: [True if f else False for f in x.is_within_plate_repl])
# then add the averaged replicate observation to the bottom of dataframe
for nobs in avgd_obs:
self.data = self.data.append(nobs, ignore_index=True)
else:
self.data = self.data.assign(is_within_plate_repl = False)
# add auc diff flag for removal
if (len(toflag) > 0):
flags = pd.DataFrame({'inhibitor': toflag, 'within_plate_repl_flag': [True]*len(toflag)})
self.data = self.data.assign(within_plate_repl_flag = lambda x: [True if f else False for f in x.withinplate_repl_flag]).merge(flags, how='left', by='inhibitor')
else:
self.data.assign(within_plate_repl_flag = False)
# ------------- Across plate replicate averaging VVV ------------------
elif method == 'across':
self._log('averaging across plate replicates...')
avgd_obs = []
replicates = []
for inhib in set(fitdat['inhibitor'].values):
assay = fitdat[fitdat['inhibitor'] == inhib]
#print('assay shape: %s' %str(assay.shape))
if assay.shape[0] > 7:
n+=1
aucs = []
# groupby row - TODO: should double check that no replicates are on same row
for plate in set(assay.plate_num):
repl = assay[assay['plate_num']==plate]
aucs.append(self._get_lin_auc(repl))
self._log('[%s] linear AUCs: %r -- range: (%.2f, %.2f)' %(inhib, aucs, min(aucs), max(aucs)))
if (max(aucs) - min(aucs)) > flag_threshold: toflag.append(inhib)
replicates.append(inhib)
new_obs = assay.groupby(['lab_id','inhibitor','conc_norm'])['cell_viab'].mean().to_frame().reset_index(level=['lab_id', 'inhibitor', 'conc_norm']).assign(across_plate_repl_flag = (max(aucs) - min(aucs)) > flag_threshold)
avgd_obs.append(new_obs)
self._log('There were %d across plate replicates [%r]' %(n, replicates))
self._log('across plate replicates flagged: %r' %toflag)
if len(replicates) > 0:
# set is_repl flag, signifies that it has been averaged
repls = pd.DataFrame({'inhibitor': replicates, 'is_across_plate_repl': [True]*len(replicates)})
self.data = self.data.merge(repls, how='left', on='inhibitor')
self.data = self.data.assign(is_across_plate_repl = lambda x: [False if f != True else True for f in x.is_across_plate_repl])
# then add the averaged replicate observation to the bottom of dataframe
for nobs in avgd_obs:
self.data = self.data.append(nobs, ignore_index=True, sort=False)
else:
self.data = self.data.assign(is_across_plate_repl = False)
if (len(toflag) > 0):
flags = pd.DataFrame({'inhibitor': toflag, 'across_plate_repl_flag': [True]*len(toflag)})
self.data = self.data.assign(across_plate_repl_flag = lambda x: [True if f else False for f in x.across_plate_repl_flag]).merge(flags, how='left', on='inhibitor')
else:
self.data.assign(across_plate_repl_flag = False)
else:
raise 'choose a proper averaging method [within, across] plates'
def _get_rectangle_auc(self, x, y):
'''
This is used when regression fit fails due to perfect separation. Using left-shifted rectangles.
x should be log10 transformed concentration
y should be cell viab [0,1]
'''
delta = (np.max(x) - np.min(x))/(len(y)-1)
auc = np.sum([delta * yy for yy in y[1:]])
#print(f'rectangular auc calc: %s' %auc)
return auc
def _get_lin_auc(self, df, plot=False, return_fig=False):
'''
fit a linear regression to the data and calculate auc
inputs
df <dataframe> 7 points to plot
plot <bool> option to plot the fit; for diagnostics
outputs
auc <float> area under the curve
'''
assert df.shape[0] == 7, f'There should only be 7 observations per replicate, got {df.shape[0]}'
skip_auc_calc=False
x = [np.log10(x) for x in df['conc_norm'].values]
y = df['cell_viab'].values
pr = sm.GLM(y, sm.add_constant(x))
try:
glm_res = pr.fit()
except ValueError:
try:
glm_res = pr.fit(method='newton')
except sm.tools.sm_exceptions.PerfectSeparationError:
auc = self._get_rectangle_auc(x,y)
self._log('Perfect separtion in linear regression: auc calculated by rectangular approximation. AUC=%.3f' %auc)
skip_auc_calc=True
# AUC calculation -----------------------------------------------------
# left rectangle auc estimate
if not skip_auc_calc:
delta = 0.001
x2 = np.arange(np.log10(min(df['conc_norm'].values)), np.log10(max(df['conc_norm'].values)), delta)
yhat = glm_res.predict(sm.add_constant(x2))
auc = np.sum(yhat*delta)
f = plt.figure(figsize = (10,10))
plt.title('Linear Regression [AUC=%.2f]' %auc)
plt.plot(x, y, 'ro', label='replicate')
plt.plot(x2, yhat, 'g-', label='fit')
plt.legend()
if plot: plt.show()
if return_fig: return auc, f
else:
plt.close(f)
return auc
def set_ceiling(self):
'''
Apply ceiling of 1
(Dan’s protocol uses 100 – note for AUC threshold adjustments)
inputs
none
outputs
none
'''
self._log('Applying a ceiling of 1 to cell viability...')
self.data.loc[(self.data.cell_viab > 1) & (~self.data.inhibitor.isin(['NONE','DMSO','F-S-V'])), 'cell_viab'] = 1
#self.data = self.data.assign(cell_viab = [1 if (cv > 1 and inhib not in ['NONE','DMSO','F-S-V']) else cv for cv, inhib in self.data[['cell_viab', 'inhibitor']].values])
def fit_probit(self, inhib, x, y, df, res, failures, plot=True):
'''
'''
try:
pr = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial(link=sm.families.links.probit()))
glm_res = pr.fit(disp=False)
auc, x2, yhat = self.calculate_auc(x, glm_res)
f, ax = plt.subplots(1,1, figsize=(10,10))
plt.title('inhib: %s [AUC= %.2f]' %(inhib, auc))
plt.plot(x2, yhat, 'r-', label='probit_fit')
plt.plot(x, y, 'bo', label='replicates')
plt.legend()
if plot: plt.show()
self._save_plot(f, inhib)
plt.close(f)
# beta0 = intercept
# beta1 = slope
intercept,beta1 = glm_res.params
probit_AIC, probit_BIC = glm_res.aic, glm_res.bic
probit_Deviance = glm_res.deviance
probit_pval = glm_res.pvalues
self._write_summary_to_file(glm_res.summary(), 'probit', inhib)
# update results
[res[var].append(val) for var,val in zip(['inhibitor','intercept', 'beta1', 'auc', 'prob_conv', 'prob_AIC', 'prob_BIC', 'prob_deviance', 'prob_pval'], [inhib, intercept ,beta1 , auc, True, probit_AIC, probit_BIC, probit_Deviance, probit_pval])]
except statsmodels.tools.sm_exceptions.PerfectSeparationError as e:
'''
Perfect separation occurs when all cell_viab values are identical.
This most commonly occurs with cell_viab = 1
'''
self._log('Perfect separation has occured during probit fitting [%s]\n\t cell_viab values: %r' %(inhib, df['cell_viab'].values))
cv = df['cell_viab'].unique()
#assert len(cv) == 1, 'perfect separation has occured, however, there are multiple cell_viab values. Please investigate issue.'
if (len(cv) == 1):
self._log('cell_viab values identical; AUC will be calculated by conc-range * cell_viab')
cv = cv[0]
auc = cv * (np.log10(max(df['conc_norm'])) - np.log10(min(df['conc_norm'])))
else:
self._log('cell_viab values non-identical; AUC valule will be calculated by linear regression.')
auc, f = self._get_lin_auc(df, plot=plot, return_fig=True)
self._save_plot(f, inhib)
plt.close('all')
[res[var].append(val) for var,val in zip(['inhibitor','intercept', 'beta1', 'auc', 'prob_conv', 'prob_AIC', 'prob_BIC', 'prob_deviance', 'prob_pval'], [inhib, None, None, auc, False, None, None, None, None])]
except:
#print('some other exception...')
failures.append( inhib )
#[res[var].append(val) for var,val in zip(['inhibitor','intercept', 'beta1', 'auc'], [inhib, 'NA' ,'NA' , 'NA'])]
if plot:
f, ax = plt.subplots(1,1, figsize = (10,10))
ax.set_title('FAILURE: %s' %(inhib))
#sbn.scatterplot(x=x2, y=yhat , ax=ax)
plt.xscale('log')
sbn.scatterplot(x=x, y=y, ax=ax)
plt.show()
raise
def fit_poly(self, inhib, x, y, res, failures, degree=5, plot=True):
'''
'''
try:
xp = PolynomialFeatures(degree=degree).fit_transform(x.reshape((-1,1)))
pr = sm.GLM(y, xp)
poly_res = pr.fit(disp=False)
x2 = np.arange(min(x), max(x), 0.01)
x2p = PolynomialFeatures(degree=degree).fit_transform(x2.reshape((-1,1)))
yhat = poly_res.predict(sm.add_constant(x2p))
f, ax = plt.subplots(1,1, figsize=(10,10))
plt.title('POLY FIT (degree=%d) - inhib: %s' %(degree, inhib))
plt.plot(x2, yhat, 'r-', label='probit_fit')
plt.plot(x, y, 'bo', label='replicates')
plt.legend()
if plot: plt.show()
self._save_plot(f, inhib, suffix='-poly5')
plt.close(f)
poly_AIC, poly_BIC = poly_res.aic, poly_res.bic
poly_Deviance = poly_res.deviance
poly_pval = poly_res.pvalues
self._write_summary_to_file(poly_res.summary(), 'poly5', inhib)
# update results
[res[var].append(val) for var,val in zip(['inhibitor', 'poly_degree', 'poly_AIC', 'poly_BIC', 'poly_deviance', 'poly_pval'], [inhib, degree, poly_AIC, poly_BIC, poly_Deviance, poly_pval])]
except ValueError as e:
self._log('WARNING! [%s] - %s' %(inhib, str(e)))
except:
print('I see the error of my ways :|')
failures.append( inhib )
raise
def _write_summary_to_file(self, summary, model_type, inhib, output_dir='../output/'):
'''
'''
dirout = './%s/%s' %(output_dir, self.plate_path[:-5].split('/')[-1])
with open('%s/dose-response-plots/%s/GLM_%s_summary.txt' %(dirout, inhib, model_type), 'w') as f:
f.write(str(summary))
def calculate_auc(self, x, model, delta=0.001):
'''
left rectangle auc estimate. Note that x is already in log10 space.
inputs
x <numpy array> endogenous variable
model <sm.GLMmodel.results> model
delta <float> the rectangular width to use for each rectangle; smaller = more accurate
outputs
auc
'''
x2 = np.arange(min(x), max(x), delta)
yhat = model.predict(sm.add_constant(x2))
return np.sum(yhat*delta), x2, yhat
def fit_regressions(self, plot=True):
'''
A probit regression was fit to all possible run groups using the model:
$$\frac{normalized\_viability}{100} \ = \ 1 + log10(concentration) $$
For all groups there were N=7 dose-response measurements and the following statistics were generated:
- Regression coefficients labeled as ‘intercept’ and ‘beta’ (slope)
- Tests of significance for the slope coefficient: *z-statistic* [beta_z] and *p-value* [beta_p]
- model AIC
- Summary measures of fit from residuals (Hosmer & Lemeshow 2000)
- Pearsons chi square
- Deviance
- Indication of whether model converged - *fitting method and convergence threshold?*
- Otherwise values are from last iteration of fitting alg.
- AUC
- Area under the curve from numerical integration
- no_curve_auc
- Auc from a ‘no-curve’ approach as described in the ‘Preprocessing’ section.
- linsp_ic50 and linsp_auc
- IC50 and AUC from an ‘overfit’ curve using a linear 5th degree spline
inputs
none
outputs
none
'''
self._log('fitting probit regressions...')
failures = []
probit_res = {x:[] for x in ['inhibitor','intercept', 'beta1', 'auc', 'prob_conv', 'prob_AIC', 'prob_BIC', 'prob_deviance', 'prob_pval']}
poly_res = {x:[] for x in ['inhibitor', 'poly_degree', 'poly_AIC', 'poly_BIC', 'poly_deviance', 'poly_pval']}
i = 0
# Filter controls
pat_dat = self.data[~self.data['inhibitor'].isin(['NONE', 'F-S-V', 'DMSO'])]
# Filter replicates that have been averaged into new observation
pat_dat = pat_dat[pat_dat['is_across_plate_repl'] != True]
pat_dat = pat_dat[pat_dat['is_within_plate_repl'] != True]
ntofit = int(pat_dat.shape[0] / 7)
self._log('number of assays to fit: %d' %ntofit)
for inhib in set(pat_dat['inhibitor'].values):
if i%1==0: print('\t\tFitting regressions...Progress: %.1f%% \t[%d/%d]' %(i/ntofit*100, i, ntofit), end='\t\t\t\t\t\r')
i+=1
df = pat_dat[pat_dat['inhibitor'] == inhib]
#! Why is this hanging???
#TODO: Go through the across plate replicate averaging and figure this problem out.
#assert df.shape[0] == 7, f"should have exactly 7 doses! [has {df.shape[0]}]\n{df[['lab_id', 'inhibitor', 'cell_viab', 'is_across_plate_repl', 'is_within_plate_repl', 'panel_number']]}"
x = np.log10( df['conc_norm'].values )
y = df['cell_viab'].values
# probit_res is an object and so should be passed by reference, hence modified inplace
self.fit_probit(inhib, x, y, df, probit_res, failures, plot=plot)
# fit polynomial for comparison to overfitting
self.fit_poly(inhib, x, y, poly_res, failures, plot=plot)
self._log('Failures [%d]: \n\t%r' %(len(failures),failures))
# add probit features
self.data = self.data.merge(pd.DataFrame(probit_res), how='left', on='inhibitor')
# add poly features
self.data = self.data.merge( | pd.DataFrame(poly_res) | pandas.DataFrame |
"""Main class and helper functions.
"""
import os
from enum import Enum
from collections import OrderedDict
from functools import reduce
from pathlib import Path
from typing import Any, Union, Optional
from typing import Iterable, Sized, Sequence, Mapping, MutableMapping
from typing import Tuple, List, Dict, KeysView
from copy import deepcopy
import numpy as np
from numpy import ma
import pandas as pd
from numpy.lib.recfunctions import rec_drop_fields
from pandas.core.index import RangeIndex
from pandas.api.types import is_string_dtype, is_categorical
from scipy import sparse
from scipy.sparse import issparse
from scipy.sparse.sputils import IndexMixin
from natsort import natsorted
# try importing zarr
try:
from zarr.core import Array as ZarrArray
except ImportError:
class ZarrArray:
@staticmethod
def __rep__():
return 'mock zarr.core.Array'
# try importing zappy
try:
from zappy.base import ZappyArray
except ImportError:
class ZappyArray:
@staticmethod
def __rep__():
return 'mock zappy.base.ZappyArray'
from . import h5py
from .layers import AnnDataLayers
from . import utils
from .utils import Index, get_n_items_idx
from .logging import anndata_logger as logger
from .compat import PathLike
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArry = ZarrArray
ZappyArry = ZappyArray
@classmethod
def classes(cls):
print(ZarrArray)
return tuple(c.value for c in cls.__members__.values())
class BoundRecArr(np.recarray):
"""A :class:`numpy.recarray` to which fields can be added using ``.['key']``.
To enable this, it is bound to a instance of AnnData.
"""
_attr_choices = ['obsm', 'varm']
def __new__(cls, input_array: np.ndarray, parent: Any, attr: str):
"""
Parameters
----------
input_array
A (structured) numpy array.
parent
Any object to which the BoundRecArr shall be bound to.
attr
The name of the attribute as which it appears in parent.
"""
arr = np.asarray(input_array).view(cls)
arr._parent = parent
arr._attr = attr
return arr
def __array_finalize__(self, obj: Any):
if obj is None: return
self._parent = getattr(obj, '_parent', None)
self._attr = getattr(obj, '_attr', None)
def __reduce__(self) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
pickled_state = super().__reduce__()
new_state = pickled_state[2] + (self.__dict__, )
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state: Sequence[Mapping[str, Any]]):
for k, v in state[-1].items():
self.__setattr__(k, v)
super().__setstate__(state[0:-1])
def copy(self, order='C') -> 'BoundRecArr':
new = super().copy()
new._parent = self._parent
return new
def flipped(self) -> 'BoundRecArr':
new_attr = (self._attr_choices[1] if self._attr == self._attr_choices[0]
else self._attr_choices[0])
return BoundRecArr(self, self._parent, new_attr)
def keys(self) -> Tuple[str, ...]:
return self.dtype.names
def __setitem__(self, key: str, arr: np.ndarray):
if not isinstance(arr, np.ndarray):
raise ValueError(
'Can only assign numpy ndarrays to .{}[{!r}], not objects of class {}'
.format(self._attr, key, type(arr))
)
if arr.ndim == 1:
raise ValueError('Use adata.obs or adata.var for 1-dimensional arrays.')
if self.shape[0] != arr.shape[0]:
raise ValueError(
'Can only assign an array of same length ({}), not of length {}.'
.format(self.shape[0], arr.shape[0])
)
# the following always allocates a new array
# even if the key already exists and dimensions match
# TODO: one could check for this case
# dtype
merged_dtype = []
found_key = False
for descr in self.dtype.descr:
if descr[0] == key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
found_key = True
else:
merged_dtype.append(descr)
if not found_key:
merged_dtype.append((key, arr.dtype, arr.shape[1]))
# create new array
new = np.empty(len(self), dtype=merged_dtype)
# fill the array
for name in new.dtype.names:
if name == key:
new[name] = arr
else:
new[name] = self[name]
# make it a BoundRecArr
# TODO: why can we not do this step before filling the array?
new = BoundRecArr(new, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def __delitem__(self, key: str):
"""Delete field with name."""
if key not in self.dtype.names:
raise ValueError(
'Currently, can only delete single names from {}.'
.format(self.dtype.names)
)
new_array = rec_drop_fields(self, key)
new = BoundRecArr(new_array, self._parent, self._attr)
setattr(self._parent, self._attr, new)
def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [('{}{:03}of{:03}')
.format(key_multicol, i+1, n_keys) for i in range(n_keys)]
return keys
def df_to_records_fixed_width(df, var_len_str=True):
uns = {} # unstructured dictionary for storing categories
names = ['index']
if | is_string_dtype(df.index) | pandas.api.types.is_string_dtype |
import sys
sys.path.append("../ern/")
sys.path.append("../dies/")
import copy
import torch
import numpy as np
import pandas as pd
from dies.utils import listify
from sklearn.metrics import mean_squared_error as mse
from torch.utils.data.dataloader import DataLoader
from fastai.basic_data import DataBunch
from fastai.basic_data import DatasetType
import glob
def to_short_name(file):
return (
file.split("/")[-1]
.replace(".h5", "")
.replace(".csv", "")
.replace(".pkl", "")
.replace(".pth", "")
.replace("_config", "")
)
def create_databunch(
train_ds, val_ds, test_ds, batch_size, device,
):
train_ds.to_device(device)
tr = DataLoader(
train_ds,
batch_size,
drop_last=True,
shuffle=True,
# num_workers=6,
pin_memory=False,
)
val_ds.to_device(device)
val = DataLoader(val_ds, batch_size, pin_memory=False)
if test_ds is not None:
test_ds.to_device(device)
test = DataLoader(test_ds, batch_size, pin_memory=False)
else:
test = None
data_bunch = DataBunch(tr, val, test_dl=test)
return data_bunch
def get_config(file, include_rmse=False):
df = | pd.read_csv(file, sep=",") | pandas.read_csv |
"""
Miscellaneous data processing utility functions.
"""
import os
import pandas as pd
import geopandas as gpd
import numpy as np
def create_df(file):
"""
Returns a pandas data frame
:param file: CSV file with raw hh listing data
:return:
"""
encodings = ["ISO-8859-1", "latin1"]
for encoding in encodings:
try:
df = pd.read_csv(file, encoding=encoding)
return df
except Exception as e:
continue
def create_df_without_pandas(file):
"""
In case creating df with pandas fails due to encoding issues
we try this manual approach
:param file: CSV file with raw HH listing data
:return:
"""
data = []
for line in open(file, 'rb'):
try:
# Decode to a fail-safe string for PY3
line = line.decode('latin1')
data.append(line.split(","))
except Exception as e:
pass
try:
# Decode to a fail-safe string for PY3
line = line.decode('utf-8')
data.append(line.split(","))
except Exception as e:
pass
df = pd.DataFrame(data=data[1:], columns=data[0])
return df
def check_if_coordinates_colums_need_fixing(df, cols):
"""
Check if one of the coordinates columns (passed in pairs for lat, lon)
requires fixing if they have null string values (e.g., #NULL#)
"""
col_to_fix = []
col_okay = []
for l in cols:
if df[l].dtype == np.float64:
col_okay.append(l)
continue
col_to_fix.append(l)
fix_tuple = None
if not col_to_fix:
pass
else:
fix_tuple = {"col_to_fix": col_to_fix[0], "replace_col": col_okay[0]}
return fix_tuple
def convert_to_float(row, col_to_fix, replace_col):
"""
Simply converts a string coordinate to float but also fix
those fixes those NULL coordinate strings by replacing with other available coordinate
"""
try:
return float(row[col_to_fix])
except Exception as e:
return row[replace_col]
def fix_coordinates(df, cols):
"""
For coordinate column which require fixing, we it here
"""
to_fix_or_not_cols = check_if_coordinates_colums_need_fixing(df=df,
cols=cols)
if to_fix_or_not_cols:
fix_col = to_fix_or_not_cols["col_to_fix"]
replace_col = to_fix_or_not_cols["replace_col"]
df[fix_col] = df.apply(convert_to_float, args=(fix_col, replace_col), axis=1)
df[fix_col] = | pd.to_numeric(df[fix_col]) | pandas.to_numeric |
import unittest
import os
from collections import defaultdict
from unittest import mock
import warnings
import pandas as pd
import numpy as np
from dataprofiler.profilers import FloatColumn
from dataprofiler.profilers.profiler_options import FloatOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestFloatColumn(unittest.TestCase):
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertEqual(profiler.mean, 0)
self.assertTrue(profiler.median is np.nan)
self.assertEqual([np.nan], profiler.mode)
self.assertTrue(profiler.variance is np.nan)
self.assertTrue(profiler.skewness is np.nan)
self.assertTrue(profiler.kurtosis is np.nan)
self.assertTrue(profiler.stddev is np.nan)
self.assertIsNone(profiler.histogram_selection)
self.assertEqual(len(profiler.quantiles), 999)
self.assertIsNone(profiler.data_type_ratio)
def test_single_data_variance_case(self):
data = pd.Series([1.5]).apply(str)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 1.0)
self.assertEqual(profiler.mean, 1.5)
self.assertTrue(profiler.variance is np.nan)
data = pd.Series([2.5]).apply(str)
profiler.update(data)
self.assertEqual(profiler.match_count, 2)
self.assertEqual(profiler.mean, 2.0)
self.assertEqual(profiler.variance, 0.5)
def test_profiled_precision(self):
"""
Checks whether the precision for the profiler is correct.
:return:
"""
df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)
df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)
df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)
df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)
float_profiler = FloatColumn("Name")
float_profiler.update(df_3)
self.assertEqual(4, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_2)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_1)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_mix)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(6, float_profiler.precision['max'])
# edge cases #
# integer with 0s on right and left side
df_ints = pd.Series(['0013245678', '123456700', '0012345600'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_ints)
self.assertEqual(6, float_profiler.precision['min'])
self.assertEqual(8, float_profiler.precision['max'])
# scientific
df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_scientific)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# plus
df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_plus)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# minus
df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_minus)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# spaces around values
df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])
float_profiler = FloatColumn("Name")
float_profiler.update(df_spaces)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# constant precision
df_constant = pd.Series(['1.34', '+1.23e-4', '00101',
'+100.', '0.234', '-432', '.954',
'+.342', '-123e1', '23.1'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_constant)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(3, float_profiler.precision['max'])
self.assertEqual(3, float_profiler.precision['mean'])
self.assertEqual(10, float_profiler.precision['sample_size'])
self.assertEqual(0, float_profiler.precision['var'])
self.assertEqual(0, float_profiler.precision['std'])
# random precision
df_random = pd.Series(['+ 9', '-.3', '-1e-3', '3.2343', '0',
'1230', '0.33', '4.3', '302.1', '-4.322'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_random)
self.assertEqual(0, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
self.assertEqual(2.4444, float_profiler.precision['mean'])
self.assertEqual(9, float_profiler.precision['sample_size'])
self.assertEqual(2.7778, float_profiler.precision['var'])
self.assertEqual(1.6667, float_profiler.precision['std'])
# Ensure order doesn't change anything
df_random_order = pd.Series(['1230', '0.33', '4.3', '302.1', '-4.322',
'+ 9', '-.3', '-1e-3', '3.2343', '0'])
float_profiler_order = FloatColumn("Name")
float_profiler_order.update(df_random)
self.assertDictEqual(
float_profiler.precision, float_profiler_order.precision
)
# check to make sure all formats of precision are correctly predicted
samples = [
# value, min expected precision
['10.01', 4],
['.01', 1],
['0.01', 1],
['-0.01', 1],
['+0.01', 1],
[' +0.013', 2],
[' -1.3234e-3 ', 5],
[' 0012345600 ', 6],
[' 0012345600. ', 8],
[' -0012345600. ', 8],
]
for sample in samples:
df_series = pd.Series([sample[0]])
min_expected_precision = sample[1]
precision = FloatColumn._get_float_precision(df_series)
self.assertEqual(min_expected_precision, precision['min'],
msg='Errored for: {}'.format(sample[0]))
def test_profiled_min(self):
# test with multiple values
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[1:])
self.assertEqual(profiler.min, -4)
profiler.update(df)
self.assertEqual(profiler.min, -5)
profiler.update(pd.Series(['-4']))
self.assertEqual(profiler.min, -5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.min, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with one value
df = | pd.Series([2.0]) | pandas.Series |
from os import path
from shutil import copyfile
import numpy as np
import pandas as pd
import MDAnalysis
import matplotlib.pyplot as plt
import networkx as nx
from enmspring import pairtype
from enmspring.spring import Spring
from enmspring.k_b0_util import get_df_by_filter_st, get_df_by_filter_PP, get_df_by_filter_R, get_df_by_filter_RB, get_df_by_filter_PB, get_df_by_filter_PP2_angles, get_df_same_resid, get_df_not_same_resid, FilterSB0Agent
from enmspring.hb_util import HBAgent
from enmspring.na_seq import sequences
from enmspring.networkx_display import THY_Base, CYT_Base, ADE_Base, GUA_Base, THY_Right_Base, CYT_Right_Base, ADE_Right_Base, GUA_Right_Base
hosts = ['a_tract_21mer', 'gcgc_21mer', 'tgtg_21mer',
'atat_21mer', 'ctct_21mer', 'g_tract_21mer']
class GraphAgent:
type_na = 'bdna+bdna'
n_bp = 21
cutoff = 4.7
def __init__(self, host, rootfolder, time_label='0_5000'):
self.host = host
self.rootfolder = rootfolder
self.time_label = time_label
self.host_folder = path.join(rootfolder, host)
self.na_folder = path.join(self.host_folder, self.type_na)
self.input_folder = path.join(self.na_folder, 'input')
self.spring_obj = Spring(self.rootfolder, self.host, self.type_na, self.n_bp, time_label)
self.df_all_k = self.spring_obj.read_k_b0_pairtype_df_given_cutoff(self.cutoff)
self.crd = path.join(self.input_folder, '{0}.nohydrogen.avg.crd'.format(self.type_na))
self.npt4_crd = path.join(self.input_folder, '{0}.nohydrogen.crd'.format(self.type_na))
self.u = MDAnalysis.Universe(self.crd, self.crd)
self.map, self.inverse_map, self.residues_map, self.atomid_map,\
self.atomid_map_inverse, self.atomname_map, self.strandid_map,\
self.resid_map, self.mass_map = self.build_map()
self.node_list = None
self.d_idx = None
self.n_node = None
self.adjacency_mat = None
self.degree_mat = None
self.laplacian_mat = None
self.b0_mat = None
self.w = None # Eigenvalue array
self.v = None # Eigenvector matrix, the i-th column is the i-th eigenvector
self.strand1_array = list() # 0: STRAND1, 1: STRAND2
self.strand2_array = list() #
self.strand1_benchmark = None
self.strand2_benchmark = None
self.d_seq = {'STRAND1': sequences[host]['guide'], 'STRAND2': sequences[host]['target']}
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if atom_type == 'B':
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def initialize_three_mat(self):
self.adjacency_mat = np.zeros((self.n_node, self.n_node))
self.degree_mat = np.zeros((self.n_node, self.n_node))
self.laplacian_mat = np.zeros((self.n_node, self.n_node))
self.b0_mat = np.zeros((self.n_node, self.n_node))
print('Initialize adjacency, degree and Laplacian matrices... Done.')
def build_degree_from_adjacency(self):
for idx in range(self.n_node):
self.degree_mat[idx, idx] = self.adjacency_mat[idx, :].sum()
def build_laplacian_by_adjacency_degree(self):
self.laplacian_mat = self.degree_mat + self.adjacency_mat
print("Finish the setup for Laplaican matrix.")
def get_networkx_graph(self, df, key='k'):
# key: 'k', 'b0'
node1_list = df['Atomid_i'].tolist()
node2_list = df['Atomid_j'].tolist()
weight_list = df[key].tolist()
edges_list = [(node1, node2, {'weight': weight}) for node1, node2, weight in zip(node1_list, node2_list, weight_list)]
G = nx.Graph()
G.add_nodes_from(self.get_node_list_by_id())
G.add_edges_from(edges_list)
return G
def get_node_list_by_id(self):
return [self.atomid_map[name] for name in self.node_list]
def get_networkx_d_pos(self, radius, dist_bw_base, dist_bw_strand):
d_atcg = {'A': {'STRAND1': ADE_Base, 'STRAND2': ADE_Right_Base},
'T': {'STRAND1': THY_Base, 'STRAND2': THY_Right_Base},
'C': {'STRAND1': CYT_Base, 'STRAND2': CYT_Right_Base},
'G': {'STRAND1': GUA_Base, 'STRAND2': GUA_Right_Base}
}
d_strandid_resid = self.get_d_strandid_resid()
d_pos = dict()
x_move = 0
y_move = 0
for strand_id in ['STRAND1', 'STRAND2']:
for resid in range(1, self.n_bp+1):
resname = self.d_seq[strand_id][resid-1]
nucleobase = d_atcg[resname][strand_id](radius)
nucleobase.translate_xy(x_move, y_move)
for name in d_strandid_resid[strand_id][resid]:
atomid = self.atomid_map[name]
atomname = self.atomname_map[name]
d_pos[atomid] = nucleobase.d_nodes[atomname]
if strand_id == 'STRAND1' and (resid != self.n_bp):
y_move += dist_bw_base
elif (strand_id == 'STRAND1') and (resid == self.n_bp):
y_move -= 0
else:
y_move -= dist_bw_base
x_move -= dist_bw_strand
return d_pos
def get_d_strandid_resid(self):
d_strandid_resid = self.initialize_d_strandid_resid()
for name in self.node_list:
strandid = self.strandid_map[name]
resid = self.resid_map[name]
d_strandid_resid[strandid][resid].append(name)
return d_strandid_resid
def initialize_d_strandid_resid(self):
d_strandid_resid = dict()
for strand_id in ['STRAND1', 'STRAND2']:
d_strandid_resid[strand_id] = dict()
for resid in range(1, self.n_bp+1):
d_strandid_resid[strand_id][resid] = list()
return d_strandid_resid
def get_D_by_atomname_strandid(self, sele_name, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
sele_D = np.zeros((self.n_node, self.n_node))
for idx in sele_idx_list:
sele_D[idx, idx] = self.degree_mat[idx, idx]
return sele_D
def get_D_by_atomname_strandid_resname(self, sele_name, sele_strandid, sele_resname):
sele_resid_list = self.get_sele_resid_list_by_resname(sele_resname, sele_strandid)
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
sele_D = np.zeros((self.n_node, self.n_node))
for idx in sele_idx_list:
sele_D[idx, idx] = self.degree_mat[idx, idx]
return sele_D
def get_sele_resid_list_by_resname(self, resname, strandid):
sele_resid_list = list()
central_resids = list(range(4, 19))
#central_resids = list(range(1, 22))
for idx, nt_name in enumerate(self.d_seq[strandid]):
resid = idx + 1
if (resid in central_resids) and (nt_name == resname):
sele_resid_list.append(resid)
return sele_resid_list
def get_A_by_atomname1_atomname2(self, atomname_i, atomname_j, sele_strandid):
sele_idx_list = list()
for resid_i in range(4, 18):
resid_j = resid_i + 1
idx_i = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
sele_idx_list.append((idx_i, idx_j))
sele_A = np.zeros((self.n_node, self.n_node))
for idx_i, idx_j in sele_idx_list:
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower] # make the matrix symmetric
return sele_A
def get_A_by_atomname1_atomname2_by_resnames(self, atomname_i, atomname_j, resname_i, resname_j, sele_strandid):
sele_idx_list = list()
resid_i_list, resid_j_list = self.get_resid_i_resid_j_list(resname_i, resname_j, sele_strandid)
for resid_i, resid_j in zip(resid_i_list, resid_j_list):
idx_i = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.d_idx[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
sele_idx_list.append((idx_i, idx_j))
sele_A = np.zeros((self.n_node, self.n_node))
for idx_i, idx_j in sele_idx_list:
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower] # make the matrix symmetric
return sele_A
def get_resid_i_resid_j_list(self, resname_i, resname_j, sele_strandid):
seq = self.d_seq[sele_strandid]
central_resids = range(4, 19)
resid_i_list = list()
resid_j_list = list()
for resid in central_resids:
if (seq[resid-1] == resname_i) and (seq[resid] == resname_j):
resid_i_list.append(resid)
resid_j_list.append(resid+1)
return resid_i_list, resid_j_list
def get_atomidpairs_atomname1_atomname2(self, atomname_i, atomname_j, sele_strandid):
atomidpairs = list()
for resid_i in range(4, 18):
resid_j = resid_i + 1
idx_i = self.atomid_map[self.map[self.get_key_by_atomname_resid_strandid(atomname_i, resid_i, sele_strandid)]]
idx_j = self.atomid_map[self.map[self.get_key_by_atomname_resid_strandid(atomname_j, resid_j, sele_strandid)]]
atomidpairs.append((idx_i, idx_j))
return atomidpairs
def get_key_by_atomname_resid_strandid(self, atomname, resid, strandid):
return f'segid {strandid} and resid {resid} and name {atomname}'
def get_filter_by_atomname_strandid(self, sele_name, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
if (self.atomname_map[name] == sele_name) and (self.strandid_map[name] == sele_strandid) and (self.resid_map[name] in sele_resid_list):
sele_idx_list.append(idx)
y = np.zeros(self.n_node)
y[sele_idx_list] = 1
return y / np.linalg.norm(y)
def get_filter_by_atomname_for_YR(self, sele_name, sele_resname, sele_strandid):
sele_resid_list = list(range(4, 19))
sele_idx_list = list()
for idx, name in enumerate(self.node_list):
resid = self.resid_map[name]
if resid not in sele_resid_list:
continue
strandid = self.strandid_map[name]
if strandid != sele_strandid:
continue
resname = self.d_seq[strandid][resid-1]
if resname != sele_resname:
continue
if self.atomname_map[name] == sele_name:
sele_idx_list.append(idx)
y = np.zeros(self.n_node)
y[sele_idx_list] = 1
return y / np.linalg.norm(y)
def eigen_decompose(self):
w, v = np.linalg.eig(self.laplacian_mat)
idx = w.argsort()[::-1] # sort from big to small
self.w = w[idx]
self.v = v[:, idx]
def get_eigenvalue_by_id(self, sele_id):
return self.w[sele_id-1]
def get_eigenvector_by_id(self, sele_id):
return self.v[:,sele_id-1]
def get_qtAq(self, sele_id):
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(self.adjacency_mat, eigvector_sele))
def get_qtDq(self, sele_id):
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(self.degree_mat, eigvector_sele))
def get_qtMq(self, sele_id, M):
### M is customized matrix
eigvector_sele = self.get_eigenvector_by_id(sele_id)
return np.dot(eigvector_sele.T, np.dot(M, eigvector_sele))
def vmd_show_crd(self):
print(f'vmd -cor {self.npt4_crd}')
def copy_nohydrogen_crd(self):
allsys_root = '/home/yizaochen/codes/dna_rna/all_systems'
srt = path.join(allsys_root, self.host, self.type_na, 'input', 'heavyatoms', f'{self.type_na}.nohydrogen.crd')
dst = self.npt4_crd
copyfile(srt, dst)
print(f'cp {srt} {dst}')
def decide_eigenvector_strand(self, eigv_id):
eigv = self.get_eigenvector_by_id(eigv_id)
dot_product = np.dot(eigv, self.strand1_benchmark)
if np.isclose(dot_product, 0.):
return True #'STRAND2'
else:
return False #'STRAND1'
def set_strand_array(self):
for eigv_id in range(1, self.n_node+1):
if self.decide_eigenvector_strand(eigv_id):
self.strand2_array.append(eigv_id)
else:
self.strand1_array.append(eigv_id)
print(f'Total number of nodes: {self.n_node}')
print(f'There are {len(self.strand1_array)} eigenvectors belonging to STRAND1.')
print(f'There are {len(self.strand2_array)} eigenvectors belonging to STRAND2.')
print(f'Sum of two strands: {len(self.strand1_array)+len(self.strand2_array)}')
def get_lambda_by_strand(self, strandid):
if strandid == 'STRAND1':
return [self.get_eigenvalue_by_id(eigv_id) for eigv_id in self.strand1_array]
else:
return [self.get_eigenvalue_by_id(eigv_id) for eigv_id in self.strand2_array]
def get_eigvector_by_strand(self, strandid, sele_id):
if strandid == 'STRAND1':
real_eigv_id = self.strand1_array[sele_id]
else:
real_eigv_id = self.strand2_array[sele_id]
return self.get_eigenvector_by_id(real_eigv_id), self.get_eigenvalue_by_id(real_eigv_id)
def set_adjacency_by_df(self, df_sele):
idx_i_list = self.__get_idx_list(df_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(df_sele['Atomid_j'])
k_list = df_sele['k'].tolist()
for idx_i, idx_j, k in zip(idx_i_list, idx_j_list, k_list):
self.adjacency_mat[idx_i, idx_j] = k
def set_b0_mat_by_df(self, df_sele):
idx_i_list = self.__get_idx_list(df_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(df_sele['Atomid_j'])
b0_list = df_sele['b0'].tolist()
for idx_i, idx_j, b0 in zip(idx_i_list, idx_j_list, b0_list):
self.b0_mat[idx_i, idx_j] = b0
i_lower = np.tril_indices(self.n_node, -1)
self.b0_mat[i_lower] = self.b0_mat.transpose()[i_lower] # make the matrix symmetric
def set_adjacency_by_d(self, d_sele):
idx_i_list = self.__get_idx_list(d_sele['Atomid_i'])
idx_j_list = self.__get_idx_list(d_sele['Atomid_j'])
k_list = d_sele['k']
for idx_i, idx_j, k in zip(idx_i_list, idx_j_list, k_list):
self.adjacency_mat[idx_i, idx_j] = k
def make_adjacency_symmetry(self):
i_lower = np.tril_indices(self.n_node, -1)
self.adjacency_mat[i_lower] = self.adjacency_mat.transpose()[i_lower] # make the matrix symmetric
def write_show_nodes_tcl(self, tcl_out, colorid=0, vdw_radius=1.0):
serials_str = self.__get_serial_nodes()
f = open(tcl_out, 'w')
f.write('display resize 362 954\n\n')
f.write('mol color ColorID 6\n')
f.write('mol representation Lines 3.000\n')
f.write('mol selection all\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.write(f'mol color ColorID {colorid}\n')
f.write(f'mol representation VDW {vdw_radius:.3f} 12.000\n')
f.write(f'mol selection serial {serials_str}\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.write(f'mol color ColorID 7\n')
f.write(f'mol representation VDW 0.300 12.000\n')
f.write(f'mol selection serial 6 7 8 9\n')
f.write('mol material Opaque\n')
f.write('mol addrep 0\n')
f.close()
print(f'Write tcl to {tcl_out}')
print(f'source {tcl_out}')
def process_lines_for_edges_tcl(self, lines, df_sele, radius=0.05):
u_npt4 = MDAnalysis.Universe(self.npt4_crd, self.npt4_crd)
for atomid1, atomid2 in zip(df_sele['Atomid_i'], df_sele['Atomid_j']):
line = self.__get_draw_edge_line(u_npt4.atoms.positions, atomid1-1, atomid2-1, radius)
lines.append(line)
return lines
def write_lines_to_tcl_out(self, lines, tcl_out):
f = open(tcl_out, 'w')
for line in lines:
f.write(line)
f.close()
print(f'Write tcl to {tcl_out}')
print(f'source {tcl_out}')
def __get_idx_list(self, df_column):
cgname_list = [self.atomid_map_inverse[atomid] for atomid in df_column]
return [self.d_idx[cgname] for cgname in cgname_list]
def __get_serial_nodes(self):
serials_list = [str(self.atomid_map[cgname]) for cgname in self.d_idx.keys()]
return ' '.join(serials_list)
def __get_draw_edge_line(self, positions, atomid1, atomid2, radius):
str_0 = 'graphics 0 cylinder {'
str_1 = f'{positions[atomid1,0]:.3f} {positions[atomid1,1]:.3f} {positions[atomid1,2]:.3f}'
str_2 = '} {'
str_3 = f'{positions[atomid2,0]:.3f} {positions[atomid2,1]:.3f} {positions[atomid2,2]:.3f}'
str_4 = '} '
str_5 = f'radius {radius:.2f}\n'
return str_0 + str_1 + str_2 + str_3 + str_4 + str_5
def build_map(self):
d1 = dict() # key: selction, value: cgname
d2 = dict() # key: cgname, value: selection
d3 = dict()
d4 = dict() # key: cgname, value: atomid
d5 = dict() # key: atomid, value: cgname
d6 = dict() # key: cgname, value: atomname
d7 = dict() # key: cgname, value: strand_id
d8 = dict() # key: cgname, value: resid
d9 = dict() # key: cgname, value: mass
atomid = 1
segid1 = self.u.select_atoms("segid STRAND1")
d3['STRAND1'] = dict()
for i, atom in enumerate(segid1):
cgname = 'A{0}'.format(i+1)
selection = self.__get_selection(atom)
d1[selection] = cgname
d2[cgname] = selection
if atom.resid not in d3['STRAND1']:
d3['STRAND1'][atom.resid] = list()
d3['STRAND1'][atom.resid].append(cgname)
d4[cgname] = atomid
d5[atomid] = cgname
d6[cgname] = atom.name
d7[cgname] = 'STRAND1'
d8[cgname] = atom.resid
d9[cgname] = atom.mass
atomid += 1
segid2 = self.u.select_atoms("segid STRAND2")
d3['STRAND2'] = dict()
for i, atom in enumerate(segid2):
cgname = 'B{0}'.format(i+1)
selection = self.__get_selection(atom)
d1[selection] = cgname
d2[cgname] = selection
if atom.resid not in d3['STRAND2']:
d3['STRAND2'][atom.resid] = list()
d3['STRAND2'][atom.resid].append(cgname)
d4[cgname] = atomid
d5[atomid] = cgname
d6[cgname] = atom.name
d7[cgname] = 'STRAND2'
d8[cgname] = atom.resid
d9[cgname] = atom.mass
atomid += 1
return d1, d2, d3, d4, d5, d6, d7, d8, d9
def __get_selection(self, atom):
return 'segid {0} and resid {1} and name {2}'.format(atom.segid, atom.resid, atom.name)
class Stack(GraphAgent):
def __init__(self, host, rootfolder, time_label='0_5000'):
super().__init__(host, rootfolder, time_label)
self.df_st = self.read_df_st()
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_df_st()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
self.set_b0_mat_by_df(self.df_st)
def build_adjacency_from_df_st(self):
self.set_adjacency_by_df(self.df_st)
self.make_adjacency_symmetry()
def set_benchmark_array(self):
idx_start_strand2 = self.d_idx['B6']
strand1 = np.zeros(self.n_node)
strand2 = np.zeros(self.n_node)
strand1[:idx_start_strand2] = 1.
strand2[idx_start_strand2:] = 1.
self.strand1_benchmark = strand1
self.strand2_benchmark = strand2
def write_show_base_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
lines = self.process_lines_for_edges_tcl(lines, self.df_st, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
def get_df_qTAq_for_vmd_draw(self, eigv_id, strandid):
df = self.df_st
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigvector_by_strand(strandid, eigv_id)[0]
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
def get_sele_A_by_idx(self, atomid_i, atomid_j):
sele_A = np.zeros((self.n_node, self.n_node))
idx_i = self.d_idx[self.atomid_map_inverse[atomid_i]]
idx_j = self.d_idx[self.atomid_map_inverse[atomid_j]]
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower]
return sele_A
def read_df_st(self):
criteria = 1e-3
df1 = get_df_by_filter_st(self.df_all_k, 'st')
mask = (df1['k'] > criteria)
print("Read Dataframe of stacking: df_st")
return df1[mask]
class StackHB(Stack):
def __init__(self, host, rootfolder, time_label='0_5000'):
super().__init__(host, rootfolder, time_label)
self.hb_agent = HBAgent(host, rootfolder, self.n_bp)
def build_adjacency_from_df_st_df_hb(self):
self.set_adjacency_by_df(self.df_st)
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
self.set_adjacency_by_d(d_hb_new)
self.make_adjacency_symmetry()
def write_show_base_hb_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
lines = self.process_lines_for_edges_tcl(lines, self.df_st, radius=radius)
lines += ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
lines = self.process_lines_for_edges_tcl(lines, d_hb_new, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
class onlyHB(StackHB):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_df_hb()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
def build_adjacency_from_df_hb(self):
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
self.set_adjacency_by_d(d_hb_new)
self.make_adjacency_symmetry()
def write_show_base_hb_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
lines = self.process_lines_for_edges_tcl(lines, d_hb_new, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
def get_df_hb_new(self):
columns = ['Strand_i', 'Resid_i', 'Atomname_i', 'Atomid_i', 'Strand_j', 'Resid_j', 'Atomname_j', 'Atomid_j', 'k']
d_result = dict()
d_hb_new = self.hb_agent.get_d_hb_contain_atomid_k_all_basepair()
cgname_i_list = [self.atomid_map_inverse[atomid_i] for atomid_i in d_hb_new['Atomid_i']]
cgname_j_list = [self.atomid_map_inverse[atomid_j] for atomid_j in d_hb_new['Atomid_j']]
d_result['Strand_i'] = [self.strandid_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Strand_j'] = [self.strandid_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Resid_i'] = [self.resid_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Resid_j'] = [self.resid_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Atomname_i'] = [self.atomname_map[cgname_i] for cgname_i in cgname_i_list]
d_result['Atomname_j'] = [self.atomname_map[cgname_j] for cgname_j in cgname_j_list]
d_result['Atomid_i'] = d_hb_new['Atomid_i']
d_result['Atomid_j'] = d_hb_new['Atomid_j']
d_result['k'] = d_hb_new['k']
df_hb_new = pd.DataFrame(d_result)
criteria = 1e-3
mask = (df_hb_new['k'] > criteria)
df_hb_new = df_hb_new[mask]
return df_hb_new[columns]
def get_df_qTAq_for_vmd_draw(self, eigv_id):
df = self.get_df_hb_new()
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigenvector_by_id(eigv_id)
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
class BackboneRibose(GraphAgent):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_pp_r()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if (atom_type == 'P') or (atom_type == 'S') or (atom_type == 'B'):
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def get_df_backbone_ribose(self):
df_pp2_filter_angle = get_df_by_filter_PP2_angles(get_df_by_filter_PP(self.df_all_k, 'PP2'))
df_pp3 = get_df_by_filter_PP(self.df_all_k, 'PP3')
df_pp_lst = [df_pp2_filter_angle, df_pp3]
df_rb_lst = [get_df_by_filter_RB(self.df_all_k, subcategory) for subcategory in ['RB2', 'RB3']]
df_pb_lst = [get_df_by_filter_PB(self.df_all_k, subcategory) for subcategory in ['PB']]
df_pp_r_rb = pd.concat(df_pp_lst+df_rb_lst+df_pb_lst)
#df_pp_r_rb = pd.concat(df_pp_lst)
criteria = 1e-1
mask = (df_pp_r_rb['k'] > criteria)
return df_pp_r_rb[mask]
def build_adjacency_from_pp_r(self):
df_sele = self.get_df_backbone_ribose()
self.set_adjacency_by_df(df_sele)
self.make_adjacency_symmetry()
self.set_b0_mat_by_df(df_sele)
def get_sele_A_by_idx(self, atomid_i, atomid_j):
sele_A = np.zeros((self.n_node, self.n_node))
idx_i = self.d_idx[self.atomid_map_inverse[atomid_i]]
idx_j = self.d_idx[self.atomid_map_inverse[atomid_j]]
sele_A[idx_i, idx_j] = self.adjacency_mat[idx_i, idx_j]
i_lower = np.tril_indices(self.n_node, -1)
sele_A[i_lower] = sele_A.transpose()[i_lower]
return sele_A
def get_df_qTAq_for_vmd_draw(self, eigv_id, strandid):
df = self.get_df_backbone_ribose()
columns_qTAq = ['Strand_i', 'Resid_i', 'Atomname_i', 'Strand_j', 'Resid_j', 'Atomname_j']
d_qTAq = {col_name: df[col_name].tolist() for col_name in columns_qTAq}
d_qTAq['qTAq'] = np.zeros(df.shape[0])
q = self.get_eigvector_by_strand(strandid, eigv_id)[0]
for idx, atomids in enumerate(zip(df['Atomid_i'], df['Atomid_j'])):
atomid_i , atomid_j = atomids
A = self.get_sele_A_by_idx(atomid_i, atomid_j)
d_qTAq['qTAq'][idx] = np.dot(q.T, np.dot(A, q))
df_result = pd.DataFrame(d_qTAq)
columns_qTAq.append('qTAq')
return df_result[columns_qTAq]
def set_benchmark_array(self):
idx_start_strand2 = self.d_idx['B1']
strand1 = np.zeros(self.n_node)
strand2 = np.zeros(self.n_node)
strand1[:idx_start_strand2] = 1.
strand2[idx_start_strand2:] = 1.
self.strand1_benchmark = strand1
self.strand2_benchmark = strand2
def write_show_backbone_edges_tcl(self, tcl_out, radius=0.05):
lines = ['graphics 0 color 1\n', 'graphics 0 material AOShiny\n']
for subcategory in ['PP0', 'PP1', 'PP2', 'PP3']:
df_sele = get_df_by_filter_PP(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
for subcategory in ['R0', 'R1']:
df_sele = get_df_by_filter_R(self.df_all_k, subcategory)
lines = self.process_lines_for_edges_tcl(lines, df_sele, radius=radius)
self.write_lines_to_tcl_out(lines, tcl_out)
class BB1(GraphAgent):
def pre_process(self):
self.build_node_list()
self.initialize_three_mat()
self.build_adjacency_from_pp_r()
self.build_degree_from_adjacency()
self.build_laplacian_by_adjacency_degree()
self.eigen_decompose()
self.set_benchmark_array()
self.set_strand_array()
def build_node_list(self):
node_list = list()
d_idx = dict()
idx = 0
for cgname, atomname in self.atomname_map.items():
atom_type = pairtype.d_atomcgtype[atomname]
if (atom_type == 'P') or (atom_type == 'S') or (atom_type == 'B'):
node_list.append(cgname)
d_idx[cgname] = idx
idx += 1
self.node_list = node_list
self.d_idx = d_idx
self.n_node = len(self.node_list)
print(f"Thare are {self.n_node} nodes.")
def get_df_backbone_ribose(self):
df_pp2_filter_angle = get_df_by_filter_PP2_angles(get_df_by_filter_PP(self.df_all_k, 'PP2'))
df_pp3 = get_df_by_filter_PP(self.df_all_k, 'PP3')
df_pp_lst = [df_pp2_filter_angle, df_pp3]
df_rb_lst = [get_df_by_filter_RB(self.df_all_k, subcategory) for subcategory in ['RB2', 'RB3']]
df_pb_lst = [get_df_by_filter_PB(self.df_all_k, subcategory) for subcategory in ['PB']]
df_pp_r_rb = | pd.concat(df_pp_lst+df_rb_lst+df_pb_lst) | pandas.concat |
import io
import json
import os.path
from typing import Any, BinaryIO, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import zstandard
from databento.common.data import BIN_COLUMNS, BIN_RECORD_MAP, DERIV_SCHEMAS
from databento.common.enums import Compression, Encoding, Schema
class Bento:
"""The abstract base class for all Bento I/O classes."""
def __init__(
self,
schema: Optional[Schema],
encoding: Optional[Encoding],
compression: Optional[Compression],
):
# Set compression
self._compression = compression or self._infer_compression()
# Set encoding
self._encoding = encoding or self._infer_encoding()
# Set schema
self._schema = schema or self._infer_schema()
self._struct_fmt = np.dtype(BIN_RECORD_MAP[self._schema])
self._struct_size = self._struct_fmt.itemsize
@property
def schema(self) -> Schema:
"""
Return the output schema.
Returns
-------
Schema
"""
return self._schema
@property
def encoding(self) -> Encoding:
"""
Return the output encoding.
Returns
-------
Encoding
"""
return self._encoding
@property
def compression(self) -> Compression:
"""
Return the output compression.
Returns
-------
Compression
"""
return self._compression
@property
def struct_fmt(self) -> np.dtype:
"""
Return the binary struct format for the schema.
Returns
-------
np.dtype
"""
return self._struct_fmt
@property
def struct_size(self) -> int:
"""
Return the schemas binary struct size in bytes.
Returns
-------
int
"""
return self._struct_size
@property
def nbytes(self) -> int:
raise NotImplementedError() # pragma: no cover
@property
def raw(self) -> bytes:
"""
Return the raw data from the I/O stream.
Returns
-------
bytes
"""
raise NotImplementedError() # pragma: no cover
def reader(self, decompress: bool = False) -> BinaryIO:
"""
Return an I/O reader for the data.
Parameters
----------
decompress : bool
If data should be decompressed (if compressed).
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def writer(self) -> BinaryIO:
"""
Return a raw I/O writer for the data.
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def to_file(self, path: str) -> "FileBento":
"""
Write the data to a file at the given path.
Parameters
----------
path : str
The path to write to.
Returns
-------
FileBento
"""
with open(path, mode="wb") as f:
f.write(self.raw)
return FileBento(
path=path,
schema=self._schema,
encoding=self._encoding,
compression=self._compression,
)
def to_list(self) -> List[Any]:
"""
Return the data as a list records.
- BIN encoding will return a list of `np.void` mixed dtypes.
- CSV encoding will return a list of `str`.
- JSON encoding will return a list of `Dict[str, Any]`.
Returns
-------
List[Any]
"""
if self._encoding == Encoding.BIN:
return self._prepare_list_bin()
elif self._encoding == Encoding.CSV:
return self._prepare_list_csv()
elif self._encoding == Encoding.JSON:
return self._prepare_list_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
def to_df(self, pretty_ts: bool = False, pretty_px: bool = False) -> pd.DataFrame:
"""
Return the data as a pd.DataFrame.
Parameters
----------
pretty_ts : bool, default False
If the type of any timestamp columns should be converted from UNIX
nanosecond `int` to `pd.Timestamp` (UTC).
pretty_px : bool, default False
If the type of any price columns should be converted from `int` to
`float` at the correct scale (using the fixed precision scalar 1e-9).
Returns
-------
pd.DataFrame
"""
if self._encoding == Encoding.BIN:
df: pd.DataFrame = self._prepare_df_bin()
elif self._encoding == Encoding.CSV:
df = self._prepare_df_csv()
elif self._encoding == Encoding.JSON:
df = self._prepare_df_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
if pretty_ts:
df.index = | pd.to_datetime(df.index, utc=True) | pandas.to_datetime |
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File: PMS entity file.
# Author: Myron
# **********************************************************************************#
import pandas as pd
from copy import copy
from .. base import *
from .. pms_broker import PMSBroker
from ... import logger
from ... core.clock import clock
from ... core.schema import *
from ... core.enum import SecuritiesType
from lib.gateway.subscriber import MarketQuote
from ... trade.dividend import Dividend
from ... trade.order import PMSOrder, OrderState, OrderStateMessage
from ... utils.date_utils import get_next_date, get_latest_trading_date
from ... utils.dict_utils import DefaultDict, dict_map, CompositeDict
class SecurityPMSAgent(object):
"""
Security pms pms_agent
"""
pms_broker = PMSBroker()
portfolio_info = DefaultDict(PortfolioSchema(portfolio_type='parent_portfolio'))
sub_portfolio_info = DefaultDict(PortfolioSchema(portfolio_type='sub_portfolio'))
position_info = DefaultDict(PositionSchema(date=clock.current_date.strftime('%Y%m%d')))
order_info = DefaultDict(OrderSchema(date=clock.current_date.strftime('%Y%m%d')))
trade_info = DefaultDict(TradeSchema(date=clock.current_date.strftime('%Y%m%d')))
dividends = None
current_exchangeable = []
def __new__(cls, **kwargs):
"""
Args:
portfolio_info(dict): all portfolios | {portfolio_id: PortfolioSchema}
sub_portfolio_info(dict): all sub portfolios | {sub_portfolio_id: PortfolioSchema}
position_info(dict): all pms equities | {sub_portfolio_id: PositionSchema}
order_info(dict): all pms orders | {sub_portfolio_id: OrderSchema}
trade_info(dict): all pms trades | {sub_portfolio_id: TradeSchema}
"""
if not hasattr(cls, '_instance'):
cls._instance = super(SecurityPMSAgent, cls).__new__(cls)
return cls._instance
def prepare(self):
"""
Prepare when service is loading.
"""
self.current_exchangeable = get_current_exchangeable_securities()
def pre_trading_day(self, with_dividend=True, force_date=None):
"""
盘前处理: load collections, synchronize portfolio, and dump info to database
Args:
with_dividend(boolean): 是否分红处理
force_date(datetime.datetime): specific a base date
"""
date = force_date or clock.current_date
message = 'Begin pre trading day: '+date.strftime('%Y-%m-%d')
logger.info('[SECURITY] [PRE TRADING DAY]'+message)
portfolio_info = query_portfolio_info_by_(SecuritiesType.SECURITY)
portfolio_ids = portfolio_info.keys()
delete_redis_([SchemaType.position, SchemaType.order], portfolio_ids)
position_info = query_by_ids_('mongodb', SchemaType.position, date, portfolio_ids)
self.position_info.update(position_info)
invalid_portfolios = [key for key in self.position_info if key not in portfolio_info]
if invalid_portfolios:
invalid_msg = 'Position not loaded'+', '.join(invalid_portfolios)
logger.info('[SECURITY] [PRE TRADING DAY]'+invalid_msg)
self._load_dividend([date])
self._execute_dividend(date) if with_dividend else None
dump_to_('all', SchemaType.position, self.position_info) if self.position_info else None
order_info = query_by_ids_('mongodb', SchemaType.order, date, portfolio_ids)
dump_to_('redis', SchemaType.order, order_info) if order_info else None
self.current_exchangeable = get_current_exchangeable_securities()
self.clear()
end_msg = 'End pre trading day: '+date.strftime('%Y-%m-%d')
logger.info('[SECURITY] [PRE TRADING DAY]'+end_msg)
def accept_orders(self, orders):
"""
Interface to accept orders from outside. Things to be done: 1) do orders pre_check;
2) dump orders to database;
3) send valid orders to broker for transact;
Args:
orders(list): orders requests
"""
if isinstance(orders, dict):
orders = [orders]
pms_orders = \
[self._order_check(PMSOrder.from_request(order)) for order in orders]
update_('redis', SchemaType.order, pms_orders)
active_pms_orders = [order for order in pms_orders if order.state in OrderState.ACTIVE]
self.pms_broker.security_broker.accept_orders(active_pms_orders)
def post_trading_day(self, with_dividend=True, force_date=None):
"""
盘后处理
Args:
with_dividend(Boolean): 是否执行分红
force_date(datetime.datetime): 执行非当前日期的force_date日post_trading_day
"""
date = force_date or clock.current_date
msg = 'Begin post trading day: '+date.strftime('%Y-%m-%d')
logger.info('[SECURITY] [POST TRADING DAY]'+msg)
portfolio_info = query_portfolio_info_by_(SecuritiesType.SECURITY)
position_info = query_by_ids_('mongodb', SchemaType.position,
date=date, portfolio_ids=portfolio_info.keys())
benchmark_dict = {e.portfolio_id: e.benchmark for e in portfolio_info.itervalues()}
position_info = self.evaluate(position_info, benchmark_dict, force_date)
self.position_info.update(position_info)
self._load_dividend([date])
if with_dividend:
self._record_dividend(date)
if self.position_info:
dump_to_('all', SchemaType.position, self.position_info)
new_position_info = self._synchronize_position(date)
if new_position_info:
dump_to_('mongodb', SchemaType.position, new_position_info)
self.clear()
msg = 'End post trading day: '+date.strftime('%Y-%m-%d')
logger.info('[SECURITY] [POST TRADING DAY]'+msg)
def evaluate(self, position_info=None, benchmark_dict=None, force_evaluate_date=None):
"""
计算持仓市值、浮动盈亏以及当日用户权益
Args:
position_info(dict): 用户持仓数据
force_evaluate_date(datetime.datetime): 是否强制对根据该日期进行估值
benchmark_dict(dict): 组合所对应的benchmark
Returns:
position_info(dict): 更新之后的持仓数据
"""
if not position_info:
return position_info
benchmark_change_percent = None
if force_evaluate_date:
last_price_info = load_equity_market_data(force_evaluate_date)
# check out api.
latest_trading_date = get_latest_trading_date(force_evaluate_date)
benchmark_change_percent = load_change_percent_for_benchmark(latest_trading_date)
else:
market_quote = MarketQuote.get_instance()
all_stocks = set(SecurityPMSAgent().current_exchangeable)
if benchmark_dict:
all_stocks |= set(benchmark_dict.values())
last_price_info = market_quote.get_price_info(universes=list(all_stocks))
# logger.info('*'*30)
for portfolio_id, position_schema in position_info.iteritems():
total_values = 0
for symbol, position in position_schema.positions.iteritems():
price_info = last_price_info.get(symbol)
# logger.info("Price message: {}, {}".format(symbol, price_info))
if position and price_info:
position.evaluate(price_info['closePrice'])
position_schema.positions[symbol] = position
# if price info is not available, the total value would add the latest evaluated value.
total_values += position.value
# logger.info("total_values: {}".format(total_values))
# logger.info("cash: {}".format(position_schema.cash))
# logger.info("portfolio_value: {}".format(position_schema.portfolio_value))
position_schema.portfolio_value = position_schema.cash + total_values
position_schema.daily_return = position_schema.portfolio_value / position_schema.pre_portfolio_value - 1
if benchmark_dict:
benchmark = benchmark_dict.get(portfolio_id)
if not benchmark:
position_info[portfolio_id] = position_schema
continue
if force_evaluate_date:
position_schema.benchmark_return = benchmark_change_percent.at[benchmark]
else:
benchmark_price_item = last_price_info.get(benchmark)
pre_benchmark_price = self.pms_broker.get_pre_close_price_of_(benchmark)
if benchmark_price_item and pre_benchmark_price is not None:
position_schema.benchmark_return = calc_return(pre_benchmark_price,
benchmark_price_item['closePrice'])
position_info[portfolio_id] = position_schema
# logger.info('*'*30)
return position_info
def clear(self):
"""
Clear info
"""
self.portfolio_info.clear()
self.sub_portfolio_info.clear()
self.position_info.clear()
self.order_info.clear()
self.trade_info.clear()
def _synchronize_position(self, date):
"""
同步昨结算持仓信息
"""
current_date = date or clock.current_date
next_date = get_next_date(current_date).strftime('%Y%m%d')
def _update_date(key, value):
"""
Update the date of node value
"""
value.date = next_date
for position in value.positions.itervalues():
position.available_amount = position.amount
value.pre_portfolio_value = value.portfolio_value
value.benchmark_return = 0.
value.daily_return = 0.
return key, value
return dict_map(_update_date, copy(self.position_info))
def _load_dividend(self, trading_days=None):
"""
加载分红数据并归类
Args:
trading_days(list of datetime): 交易日列表
Returns:
"""
trading_days = trading_days or [clock.current_date]
raw_data = load_dividend_data(trading_days)
normalize_column = ['per_cash_div_af_tax', 'shares_bf_div', 'shares_af_div']
raw_data[normalize_column] = raw_data[normalize_column].fillna(0).applymap(float)
raw_data['share_ratio'] = raw_data.shares_af_div / raw_data.shares_bf_div
result = CompositeDict()
records = raw_data.groupby('record_date').groups
cash_divs = raw_data.groupby('pay_cash_date').groups
ex_divs = raw_data.groupby('ex_div_date').groups
for date, group in records.iteritems():
date = pd.to_datetime(date)
temp_data = raw_data.iloc[group][['security_id', 'pay_cash_date', 'ex_div_date']].as_matrix().tolist()
dividend_items = dict()
for dividend_item in temp_data:
execute_dates = filter(lambda x: x, dividend_item[1:])
if not execute_dates:
continue
execute_date = max(execute_dates)
key = dividend_item[0]
dividend_items[key] = execute_date
result['div_record'][date.strftime('%Y%m%d')] = dividend_items
result['cash_div'] = \
{ | pd.to_datetime(date) | pandas.to_datetime |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import re
from copy import copy as copy_obj
from numbers import Integral
from typing import Type, Sequence
import numpy as np
import pandas as pd
from pandas._libs import lib
from pandas.api.indexers import check_array_indexer
from pandas.api.types import (
pandas_dtype,
is_scalar,
is_array_like,
is_string_dtype,
is_list_like,
)
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
)
from pandas.arrays import StringArray as StringArrayBase
from pandas.core import ops
from pandas.core.algorithms import take
from pandas.compat import set_function_name
try:
from pandas._libs.arrays import NDArrayBacked
except ImportError:
NDArrayBacked = None
try:
import pyarrow as pa
pa_null = pa.NULL
except ImportError: # pragma: no cover
pa = None
pa_null = None
from ..config import options
from ..core import is_kernel_mode
from ..lib.version import parse as parse_version
from ..utils import tokenize
_use_bool_any_all = parse_version(pd.__version__) >= parse_version("1.3.0")
class ArrowDtype(ExtensionDtype):
@property
def arrow_type(self): # pragma: no cover
raise NotImplementedError
def __from_arrow__(self, array):
return self.construct_array_type()(array)
@register_extension_dtype
class ArrowStringDtype(ArrowDtype):
"""
Extension dtype for arrow string data.
.. warning::
ArrowStringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, ArrowStringDtype.na_value may change to no longer be
``numpy.nan``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> import mars.dataframe as md
>>> md.ArrowStringDtype()
ArrowStringDtype
"""
type = str
kind = "U"
name = "Arrow[string]"
na_value = pa_null
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls) -> "Type[ArrowStringArray]":
return ArrowStringArray
@property
def arrow_type(self):
return pa.string()
@register_extension_dtype
class ArrowStringDtypeAlias(ArrowStringDtype):
name = "arrow_string" # register an alias name for compatibility
class ArrowListDtypeType(type):
"""
the type of ArrowListDtype, this metaclass determines subclass ability
"""
pass
class ArrowListDtype(ArrowDtype):
_metadata = ("_value_type",)
def __init__(self, dtype):
if isinstance(dtype, type(self)):
dtype = dtype.value_type
if pa and isinstance(dtype, pa.DataType):
dtype = dtype.to_pandas_dtype()
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype) and not isinstance(dtype, ArrowStringDtype):
# convert string dtype to arrow string dtype
dtype = ArrowStringDtype()
self._value_type = dtype
@property
def value_type(self):
return self._value_type
@property
def kind(self):
return "O"
@property
def type(self):
return ArrowListDtypeType
@property
def name(self):
return f"Arrow[List[{self.value_type.name}]]"
@property
def arrow_type(self):
if isinstance(self._value_type, ArrowDtype):
arrow_subdtype = self._value_type.arrow_type
else:
arrow_subdtype = pa.from_numpy_dtype(self._value_type)
return pa.list_(arrow_subdtype)
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> "Type[ArrowListArray]":
return ArrowListArray
@classmethod
def construct_from_string(cls, string):
msg = f"Cannot construct a 'ArrowListDtype' from '{string}'"
xpr = re.compile(r"Arrow\[List\[(?P<value_type>[^,]*)\]\]$")
m = xpr.match(string)
if m:
value_type = m.groupdict()["value_type"]
return ArrowListDtype(value_type)
else:
raise TypeError(msg)
@classmethod
def is_dtype(cls, dtype) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str):
try:
cls.construct_from_string(dtype)
except TypeError:
return False
else:
return True
else:
return isinstance(dtype, cls)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, ArrowListDtype):
return False
value_type = self._value_type
other_value_type = other._value_type
try:
return value_type == other_value_type
except TypeError:
# cannot compare numpy dtype and extension dtype
return other_value_type == value_type
class ArrowArray(ExtensionArray):
_arrow_type = None
def __init__(self, values, dtype: ArrowDtype = None, copy=False):
pandas_only = self._pandas_only()
if pa is not None and not pandas_only:
self._init_by_arrow(values, dtype=dtype, copy=copy)
elif not is_kernel_mode():
# not in kernel mode, allow to use numpy handle data
# just for infer dtypes purpose
self._init_by_numpy(values, dtype=dtype, copy=copy)
else:
raise ImportError(
"Cannot create ArrowArray " "when `pyarrow` not installed"
)
# for test purpose
self._force_use_pandas = pandas_only
def _init_by_arrow(self, values, dtype: ArrowDtype = None, copy=False):
if isinstance(values, (pd.Index, pd.Series)):
# for pandas Index and Series,
# convert to PandasArray
values = values.array
if isinstance(values, type(self)):
arrow_array = values._arrow_array
elif isinstance(values, ExtensionArray):
# if come from pandas object like index,
# convert to pandas StringArray first,
# validation will be done in construct
arrow_array = pa.chunked_array([pa.array(values, from_pandas=True)])
elif isinstance(values, pa.ChunkedArray):
arrow_array = values
elif isinstance(values, pa.Array):
arrow_array = pa.chunked_array([values])
else:
arrow_array = pa.chunked_array([pa.array(values, type=dtype.arrow_type)])
if copy:
arrow_array = copy_obj(arrow_array)
self._use_arrow = True
self._arrow_array = arrow_array
if NDArrayBacked is not None and isinstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, np.array([]), dtype)
else:
self._dtype = dtype
def _init_by_numpy(self, values, dtype: ArrowDtype = None, copy=False):
self._use_arrow = False
ndarray = np.array(values, copy=copy)
if NDArrayBacked is not None and isinstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, ndarray, dtype)
else:
self._dtype = dtype
self._ndarray = np.array(values, copy=copy)
@classmethod
def _pandas_only(cls):
return options.dataframe.arrow_array.pandas_only
def __repr__(self):
return f"{type(self).__name__}({repr(self._array)})"
@property
def _array(self):
return self._arrow_array if self._use_arrow else self._ndarray
@property
def dtype(self) -> "Type[ArrowDtype]":
return self._dtype
@property
def nbytes(self) -> int:
if self._use_arrow:
return sum(
x.size
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
)
else:
return self._ndarray.nbytes
@property
def shape(self):
if self._use_arrow:
return (self._arrow_array.length(),)
else:
return self._ndarray.shape
def memory_usage(self, deep=True) -> int:
if self._use_arrow:
return self.nbytes
else:
return pd.Series(self._ndarray).memory_usage(index=False, deep=deep)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
if pa is None or cls._pandas_only():
# pyarrow not installed, just return numpy
ret = np.empty(len(scalars), dtype=object)
ret[:] = scalars
return cls(ret)
if pa_null is not None and isinstance(scalars, type(pa_null)):
scalars = []
elif not hasattr(scalars, "dtype"):
ret = np.empty(len(scalars), dtype=object)
for i, s in enumerate(scalars):
ret[i] = s
scalars = ret
elif isinstance(scalars, cls):
if copy:
scalars = scalars.copy()
return scalars
arrow_array = pa.chunked_array([cls._to_arrow_array(scalars)])
return cls(arrow_array, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@staticmethod
def _can_process_slice_via_arrow(slc):
if not isinstance(slc, slice):
return False
if slc.step is not None and slc.step != 1:
return False
if slc.start is not None and not isinstance(
slc.start, Integral
): # pragma: no cover
return False
if slc.stop is not None and not isinstance(
slc.stop, Integral
): # pragma: no cover
return False
return True
def _values_for_factorize(self):
arr = self.to_numpy()
mask = self.isna()
arr[mask] = -1
return arr, -1
def _values_for_argsort(self):
return self.to_numpy()
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@staticmethod
def _process_pos(pos, length, is_start):
if pos is None:
return 0 if is_start else length
return pos + length if pos < 0 else pos
@classmethod
def _post_scalar_getitem(cls, lst):
return lst.to_pandas()[0]
def __getitem__(self, item):
cls = type(self)
if pa is None or self._force_use_pandas:
# pyarrow not installed
result = self._ndarray[item]
if pd.api.types.is_scalar(item):
return result
else:
return type(self)(result)
has_take = hasattr(self._arrow_array, "take")
if not self._force_use_pandas and has_take:
if pd.api.types.is_scalar(item):
item = item + len(self) if item < 0 else item
return self._post_scalar_getitem(self._arrow_array.take([item]))
elif self._can_process_slice_via_arrow(item):
length = len(self)
start, stop = item.start, item.stop
start = self._process_pos(start, length, True)
stop = self._process_pos(stop, length, False)
return cls(
self._arrow_array.slice(offset=start, length=stop - start),
dtype=self._dtype,
)
elif hasattr(item, "dtype") and np.issubdtype(item.dtype, np.bool_):
return cls(
self._arrow_array.filter(pa.array(item, from_pandas=True)),
dtype=self._dtype,
)
elif hasattr(item, "dtype"):
length = len(self)
item = np.where(item < 0, item + length, item)
return cls(self._arrow_array.take(item), dtype=self._dtype)
array = np.asarray(self._arrow_array.to_pandas())
return cls(array[item], dtype=self._dtype)
@classmethod
def _concat_same_type(cls, to_concat: Sequence["ArrowArray"]) -> "ArrowArray":
if pa is None or cls._pandas_only():
# pyarrow not installed
return cls(np.concatenate([x._array for x in to_concat]))
chunks = list(
itertools.chain.from_iterable(x._arrow_array.chunks for x in to_concat)
)
if len(chunks) == 0:
chunks = [pa.array([], type=to_concat[0].dtype.arrow_type)]
return cls(pa.chunked_array(chunks))
def __len__(self):
return len(self._array)
def __array__(self, dtype=None):
return self.to_numpy(dtype=dtype)
def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default):
if self._use_arrow:
array = np.asarray(self._arrow_array.to_pandas())
else:
array = self._ndarray
if copy or na_value is not lib.no_default:
array = array.copy()
if na_value is not lib.no_default:
array[self.isna()] = na_value
return array
@classmethod
def _array_fillna(cls, array, value):
return array.fillna(value)
def fillna(self, value=None, method=None, limit=None):
cls = type(self)
if pa is None or self._force_use_pandas:
# pyarrow not installed
return cls(
pd.Series(self.to_numpy()).fillna(
value=value, method=method, limit=limit
)
)
chunks = []
for chunk_array in self._arrow_array.chunks:
array = chunk_array.to_pandas()
if method is None:
result_array = self._array_fillna(array, value)
else:
result_array = array.fillna(value=value, method=method, limit=limit)
chunks.append(pa.array(result_array, from_pandas=True))
return cls(pa.chunked_array(chunks), dtype=self._dtype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if isinstance(dtype, ArrowStringDtype):
if copy:
return self.copy()
return self
if pa is None or self._force_use_pandas:
# pyarrow not installed
if isinstance(dtype, ArrowDtype):
dtype = dtype.type
return type(self)(pd.Series(self.to_numpy()).astype(dtype, copy=copy))
# try to slice 1 record to get the result dtype
test_array = self._arrow_array.slice(0, 1).to_pandas()
test_result_array = test_array.astype(dtype).array
result_array = type(test_result_array)(
np.full(
self.shape,
test_result_array.dtype.na_value,
dtype=np.asarray(test_result_array).dtype,
)
)
start = 0
# use chunks to do astype
for chunk_array in self._arrow_array.chunks:
result_array[start : start + len(chunk_array)] = (
chunk_array.to_pandas().astype(dtype).array
)
start += len(chunk_array)
return result_array
def isna(self):
if (
not self._force_use_pandas
and self._use_arrow
and hasattr(self._arrow_array, "is_null")
):
return self._arrow_array.is_null().to_pandas().to_numpy()
elif self._use_arrow:
return pd.isna(self._arrow_array.to_pandas()).to_numpy()
else:
return pd.isna(self._ndarray)
def take(self, indices, allow_fill=False, fill_value=None):
if (
allow_fill is False or (allow_fill and fill_value is self.dtype.na_value)
) and len(self) > 0:
return type(self)(self[indices], dtype=self._dtype)
if self._use_arrow:
array = self._arrow_array.to_pandas().to_numpy()
else:
array = self._ndarray
replace = False
if allow_fill and (fill_value is None or fill_value == self._dtype.na_value):
fill_value = self.dtype.na_value
replace = True
result = take(array, indices, fill_value=fill_value, allow_fill=allow_fill)
del array
if replace and pa is not None:
# pyarrow cannot recognize pa.NULL
result[result == self.dtype.na_value] = None
return type(self)(result, dtype=self._dtype)
def copy(self):
if self._use_arrow:
return type(self)(copy_obj(self._arrow_array))
else:
return type(self)(self._ndarray.copy())
def value_counts(self, dropna=False):
if self._use_arrow:
series = self._arrow_array.to_pandas()
else:
series = pd.Series(self._ndarray)
return type(self)(series.value_counts(dropna=dropna), dtype=self._dtype)
if _use_bool_any_all:
def any(self, axis=0, out=None):
return self.to_numpy().astype(bool).any(axis=axis, out=out)
def all(self, axis=0, out=None):
return self.to_numpy().astype(bool).all(axis=axis, out=out)
else:
def any(self, axis=0, out=None):
return self.to_numpy().any(axis=axis, out=out)
def all(self, axis=0, out=None):
return self.to_numpy().all(axis=axis, out=out)
def __mars_tokenize__(self):
if self._use_arrow:
return tokenize(
[
memoryview(x)
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
]
)
else:
return self._ndarray
class ArrowStringArray(ArrowArray, StringArrayBase):
def __init__(self, values, dtype=None, copy=False):
if dtype is not None:
assert isinstance(dtype, ArrowStringDtype)
ArrowArray.__init__(self, values, ArrowStringDtype(), copy=copy)
@classmethod
def from_scalars(cls, values):
if pa is None or cls._pandas_only():
return cls._from_sequence(values)
else:
arrow_array = pa.chunked_array([cls._to_arrow_array(values)])
return cls(arrow_array)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars).cast(pa.string())
def __setitem__(self, key, value):
if isinstance(value, (pd.Index, pd.Series)):
value = value.to_numpy()
if isinstance(value, type(self)):
value = value.to_numpy()
key = check_array_indexer(self, key)
scalar_key = is_scalar(key)
scalar_value = is_scalar(value)
if scalar_key and not scalar_value:
raise ValueError("setting an array element with a sequence.")
# validate new items
if scalar_value:
if pd.isna(value):
value = None
elif not isinstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a ArrowStringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if len(value) and not lib.is_string_array(value, skipna=True):
raise ValueError("Must provide strings.")
if self._use_arrow:
string_array = np.asarray(self._arrow_array.to_pandas())
string_array[key] = value
self._arrow_array = pa.chunked_array([pa.array(string_array)])
else:
self._ndarray[key] = value
# Override parent because we have different return types.
@classmethod
def _create_arithmetic_method(cls, op):
# Note: this handles both arithmetic and comparison methods.
def method(self, other):
is_arithmetic = True if op.__name__ in ops.ARITHMETIC_BINOPS else False
pandas_only = cls._pandas_only()
is_other_array = False
if not is_scalar(other):
is_other_array = True
other = np.asarray(other)
self_is_na = self.isna()
other_is_na = pd.isna(other)
mask = self_is_na | other_is_na
if pa is None or pandas_only:
if is_arithmetic:
ret = np.empty(self.shape, dtype=object)
else:
ret = np.zeros(self.shape, dtype=bool)
valid = ~mask
arr = (
self._arrow_array.to_pandas().to_numpy()
if self._use_arrow
else self._ndarray
)
o = other[valid] if is_other_array else other
ret[valid] = op(arr[valid], o)
if is_arithmetic:
return ArrowStringArray(ret)
else:
return pd.arrays.BooleanArray(ret, mask)
chunks = []
mask_chunks = []
start = 0
for chunk_array in self._arrow_array.chunks:
chunk_array = np.asarray(chunk_array.to_pandas())
end = start + len(chunk_array)
chunk_mask = mask[start:end]
chunk_valid = ~chunk_mask
if is_arithmetic:
result = np.empty(chunk_array.shape, dtype=object)
else:
result = np.zeros(chunk_array.shape, dtype=bool)
chunk_other = other
if is_other_array:
chunk_other = other[start:end]
chunk_other = chunk_other[chunk_valid]
# calculate only for both not None
result[chunk_valid] = op(chunk_array[chunk_valid], chunk_other)
if is_arithmetic:
chunks.append(pa.array(result, type=pa.string(), from_pandas=True))
else:
chunks.append(result)
mask_chunks.append(chunk_mask)
if is_arithmetic:
return ArrowStringArray(pa.chunked_array(chunks))
else:
return pd.arrays.BooleanArray(
np.concatenate(chunks), np.concatenate(mask_chunks)
)
return set_function_name(method, f"__{op.__name__}__", cls)
def shift(self, periods: int = 1, fill_value: object = None) -> "ArrowStringArray":
return ExtensionArray.shift(self, periods=periods, fill_value=fill_value)
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
_create_comparison_method = _create_arithmetic_method
ArrowStringArray._add_arithmetic_ops()
ArrowStringArray._add_comparison_ops()
class ArrowListArray(ArrowArray):
def __init__(self, values, dtype: ArrowListDtype = None, copy=False):
if dtype is None:
if isinstance(values, type(self)):
dtype = values.dtype
elif pa is not None:
if isinstance(values, pa.Array):
dtype = ArrowListDtype(values.type.value_type)
elif isinstance(values, pa.ChunkedArray):
dtype = ArrowListDtype(values.type.value_type)
else:
values = pa.array(values)
if values.type == pa.null():
dtype = ArrowListDtype(pa.string())
else:
dtype = ArrowListDtype(values.type.value_type)
else:
value_type = np.asarray(values[0]).dtype
dtype = ArrowListDtype(value_type)
super().__init__(values, dtype=dtype, copy=copy)
def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default):
if self._use_arrow:
s = self._arrow_array.to_pandas()
else:
s = pd.Series(self._ndarray)
s = s.map(lambda x: x.tolist() if hasattr(x, "tolist") else x)
if copy or na_value is not lib.no_default:
s = s.copy()
if na_value is not lib.no_default:
s[self.isna()] = na_value
return np.asarray(s)
@classmethod
def _post_scalar_getitem(cls, lst):
return lst[0].as_py()
def __setitem__(self, key, value):
if isinstance(value, (pd.Index, pd.Series)):
value = value.to_numpy()
key = check_array_indexer(self, key)
scalar_key = is_scalar(key)
# validate new items
if scalar_key:
if pd.isna(value):
value = None
elif not is_list_like(value):
raise ValueError("Must provide list.")
if self._use_arrow:
array = np.asarray(self._arrow_array.to_pandas())
array[key] = value
self._arrow_array = pa.chunked_array(
[pa.array(array, type=self.dtype.arrow_type)]
)
else:
self._ndarray[key] = value
@classmethod
def _array_fillna(cls, series, value):
# cannot fillna directly, because value is a list-like object
return series.apply(lambda x: x if | is_list_like(x) | pandas.api.types.is_list_like |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup as bs
import pandas as pd
from time import sleep
capa = DesiredCapabilities.CHROME
capa["pageLoadStrategy"] = "none"
opts = Options()
opts.add_argument('--ignore-certificate-errors')
opts.add_argument('--ignore-ssl-errors')
# opts.add_argument("--headless")
driver = webdriver.Chrome(
desired_capabilities=capa, options=opts, executable_path='C:/WebDrivers/chromedriver.exe')
try:
print()
print('Scraping Process is Started')
url = 'https://www.nasdaq.com/market-activity/stocks/tsla/short-interest'
wait = WebDriverWait(driver, 10)
print()
print('Loading Url Please Wait...')
driver.get(url)
driver.maximize_window()
wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '.short-interest__table-body')))
sleep(3)
driver.execute_script("window.scrollBy(0, 900)", "")
# driver.execute_script("window.stop();")
print()
print('Waiting for Content to Load Properly')
sleep(3)
src = driver.page_source
soup = bs(src, 'lxml')
list_1 = []
list_2 = []
list_3 = []
list_4 = []
print()
print('Content is Loaded, Scraping Required Detail')
print('Wait Please...')
SETTLEMENT_DATE = soup.find_all('th', class_='short-interest__cell')
for date in SETTLEMENT_DATE:
text0 = date.text
list_1.append(text0)
SHORT_INTEREST = soup.find_all(
'td', class_='short-interest__cell--interest')
for interest in SHORT_INTEREST:
text1 = interest.text
list_2.append(text1)
AVG_DAILY_SHARE_VOLUME = soup.find_all(
'td', class_='short-interest__cell--avgDailyShareVolume')
for avg in AVG_DAILY_SHARE_VOLUME:
text2 = avg.text
list_3.append(text2)
DAYS_TO_COVER = soup.find_all(
'td', class_='short-interest__cell--daysToCover')
for days in DAYS_TO_COVER:
text3 = days.text
list_4.append(text3)
print()
print('Scraping Done, Saving the Data')
data = {
'SETTLEMENT_DATE': list_1,
'SHORT_INTEREST': list_2,
'AVG_DAILY_SHARE_VOLUME': list_3,
'DAYS_TO_COVER': list_4
}
df = | pd.DataFrame(data) | pandas.DataFrame |
import backend.constants as constants
from datetime import datetime
import pandas as pd
df = pd.read_excel("C://sl_data//xlsx//okpm_results.xlsx", sheet_name="A")
archive.load_machine_info(constants.machine_info_path)
machine_legend = pd.read_excel("C://sl_data//xlsx//okpm_results.xlsx", sheet_name="machine_legend", header=0, index_col=0)
conditionals = [3, 1 ,1 ,1, 2, 3, 2, 1, 2, 3, 2]
def create_schedule(gunler, shift, overtime_hour):
temp_df = pd.DataFrame(columns=[0,1])
for x in range(len(gunler)):
if conditionals[x] == 1:
temp_df = pd.concat([temp_df, df_types[1][shift]], ignore_index = True)
elif conditionals[x] == 2:
temp_df = pd.concat([temp_df, df_types[2](overtime_hour)], ignore_index = True)
else:
temp_df = pd.concat([temp_df, df_types[3]], ignore_index = True)
return temp_df
df_types = {1: {1: pd.DataFrame(data=[[1, 0, 0, 0, 0, 0], [7.5, 0.5]*3]).transpose(), 2: pd.DataFrame(data=[[1, 0, 1, 0, 0, 0], [7.5, 0.5]*3]).transpose(), 3: pd.DataFrame(data=[[1, 0, 1, 0, 1, 0], [7.5, 0.5]*3]).transpose()}, 2: ctesi_creator, 3: pd.DataFrame(data=[0, 24]).transpose()}
def ctesi_creator(hour):
if hour<24:
out = pd.DataFrame(data=[[1,hour],[0, 24-hour]])
else:
out = | pd.DataFrame(data=[[1,24],[0, 0]]) | pandas.DataFrame |
import json
import math
import random
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn import linear_model
from math import sqrt
from DataSynthesizer.lib.utils import read_json_file
from FAIR.FairnessInRankings import FairnessInRankingsTester
def save_uploaded_file(file, current_file):
"""
Save user uploaded data on server.
Attributes:
file: the uploaded dataset.
current_file: file name with out ".csv" suffix
"""
with open(current_file+".csv", 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def get_score_scatter(current_file,top_K=100):
"""
Generated data for scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size that scatter plot included
Return: data for scatter plot using HighChart format
"""
data = pd.read_csv(current_file+"_weightsum.csv").head(top_K)
scatter_points = []
score_value = data["GeneratedScore"].tolist()
position_value = [x for x in range(1, len(data) + 1)]
for i in range(len(score_value)):
scatter_points.append([position_value[i], score_value[i]])
return scatter_points
def getAttValueCountTopAndOverall(input_data, att_name, top_K=10):
"""
Subfunction to count values of input attribute in the data for top 10 and overall pie chart.
Attributes:
input_data: dataframe that store the input data
att_name: name of attribuet to count
top_K: top k position to count the value, default value is 10
Return: json data includes two two-dimension arrays for value and its count at top 10 and overall
"""
counts_all = {}
all_values_count = input_data[att_name].value_counts()
top_data = input_data[0:top_K]
# get overall counts
new_values_all = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
cur_cate = str(cur_cate)
cur_count = int(all_values_count.values[i])
new_values_all.append([cur_cate,cur_count])
counts_all["overall"] = new_values_all
# get top K counts and make sure list of counts include every value of input attribute for consistent pie chart colors
top_values_count = top_data[att_name].value_counts()
top_cates = top_values_count.index
# generate a dict to store the top k value counts
top_values_count_dic = {}
for i in range(len(top_values_count)):
top_values_count_dic[top_values_count.index[i]] = int(top_values_count.values[i])
# generate a new value list for top K using same order as in over all list
new_values_top = []
for i in range(len(all_values_count)):
cur_cate = all_values_count.index[i]
# if not a string, then encode it to the type that is JSON serializable
if not isinstance(cur_cate, str):
str_cur_cate = str(cur_cate)
else:
str_cur_cate = cur_cate
if cur_cate in top_cates: # exiting in top K
new_values_top.append([str_cur_cate, top_values_count_dic[cur_cate]])
else:
new_values_top.append([str_cur_cate, 0])
counts_all["topTen"] = new_values_top
return counts_all
def get_chart_data(current_file, att_names):
"""
Generated data for pie chart.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
att_names: list of attribute names to compute the chart data
Return: json data for pie chart plot using HighChart format
"""
data = pd.read_csv(current_file + "_weightsum.csv")
pie_data = {}
for ai in att_names:
cur_ai_json = {}
counts_all = getAttValueCountTopAndOverall(data,ai)
cur_ai_json["topTen"] = counts_all["topTen"]
cur_ai_json["overall"] = counts_all["overall"]
pie_data[ai] = cur_ai_json
return pie_data
def computeSlopeOfScores(current_file,top_K, round_default=2):
"""
Compute the slop of scatter plot.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of data size to compute the slope
Return: slope of scatter plot of top_K data
"""
data = pd.read_csv(current_file + "_weightsum.csv")
top_data = data[0:top_K]
xd = [i for i in range(1,top_K+1)]
yd = top_data["GeneratedScore"].tolist()
# determine best fit line
par = np.polyfit(xd, yd, 1, full=True)
slope = par[0][0]
return round(slope,round_default)
def compute_correlation(current_file,y_col="GeneratedScore",top_threshold=3,round_default=2):
"""
Compute the correlation between attributes and generated scores.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
y_col: column name of Y variable
top_threshold: threshold of number of returned correlated attribute
round_default: threshold of round function for the returned coefficient
Return: list of correlated attributes and its coefficients
"""
# get the data for generated ranking
ranking_df = pd.read_csv(current_file+"_weightsum.csv")
# get the upload data for correlation computation
upload_df = pd.read_csv(current_file+".csv")
numeric_atts = list(upload_df.describe().columns)
X = upload_df[numeric_atts].values
# no need to standardize data
# scaler = StandardScaler()
# transform_X = scaler.fit_transform(X)
y = ranking_df[y_col].values
regr = linear_model.LinearRegression(normalize=False)
regr.fit(X, y)
# get coeff's, ordered by significance
# format weight with decile of 3
for i in range(len(regr.coef_)):
regr.coef_[i] = round(regr.coef_[i], round_default)
# normalize coefficients to [-1,1]
max_coef = max(regr.coef_)
min_coef = min(regr.coef_)
abs_max = max(abs(max_coef),abs(min_coef))
stand_coef = []
for ci in regr.coef_:
new_ci = round(ci/abs_max,round_default)
stand_coef.append(new_ci)
# coeff_zip = zip(regr.coef_, numeric_atts)
coeff_zip = zip(stand_coef, numeric_atts)
coeff_sorted = sorted(coeff_zip, key=lambda tup: abs(tup[0]), reverse=True)
if len(coeff_sorted) > top_threshold:
coeff_return = coeff_sorted[0:top_threshold]
else:
coeff_return = coeff_sorted
# only return top_threshold most correlated attributes
return coeff_return
def compute_statistic_topN(chosed_atts,current_file,top_N,round_default=1):
"""
Compute the statistics of input attributes.
Attributes:
chosed_atts: list of attributes to be computed
current_file: file name that stored the data (with out ".csv" suffix)
top_N: size of data to be used in current_file
round_default: threshold of round function for the returned statistics
Return: json data of computed statistics
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv").head(top_N)
statistic_data = {}
# get the median data
for atti in chosed_atts:
cur_att_max = max(data[atti])
cur_att_median = np.median(data[atti])
cur_att_min = min(data[atti])
statistic_data[atti] = {"max": round(cur_att_max, round_default),
"median": round(cur_att_median, round_default),
"min": round(cur_att_min, round_default)}
return statistic_data
def mergeUnfairRanking(_px, _sensitive_idx, _fprob): # input is the ranking
"""
Generate a fair ranking.
Attributes:
_px: input ranking (sorted), list of ids
_sensitive_idx: the index of protected group in the input ranking
_fprob: probability to choose the protected group
Return: generated fair ranking, list of ids
"""
# _px=sorted(range(len(_inputrankingscore)), key=lambda k: _inputrankingscore[k],reverse=True)
rx = [x for x in _px if x not in _sensitive_idx]
qx = [x for x in _px if x in _sensitive_idx]
rx.reverse() # prepare for pop function to get the first element
qx.reverse()
res_list = []
while (len(qx) > 0 and len(rx) > 0):
r_cur = random.random()
# r_cur=random.uniform(0,1.1)
if r_cur < _fprob:
res_list.append(qx.pop()) # insert protected group first
else:
res_list.append(rx.pop())
if len(qx) > 0:
qx.reverse()
res_list = res_list + qx
if len(rx) > 0:
rx.reverse()
res_list = res_list + rx
if len(res_list) < len(_px):
print("Error!")
return res_list
def runFairOracles(chosed_atts,current_file,alpha_default=0.05,k_threshold=200,k_percentage=0.5):
"""
Run all fairness oracles: FA*IR, Pairwise and Proportion
Attributes:
chosed_atts: list of sensitive attributes
current_file: file name that stored the data (with out ".csv" suffix)
alpha_default: default value of significance level in each oracle
k_threshold: threshold of size of upload data to decide the top-K in FA*IR and Proportion
k_percentage: threshold to help to decide the top-K in FA*IR and Proportion when upload dataset's size less than k_threshold
Return: json data of fairness results of all oracles
"""
# data is sorted by ranking scores from higher to lower
data = pd.read_csv(current_file+"_weightsum.csv")
total_n = len(data)
# set top K based on the size of input data
# if N > 200, then set top K = 100, else set top K = 0.5*N
if total_n > k_threshold:
top_K = 100
else:
top_K = int(np.ceil(k_percentage* total_n))
fair_res_data = {} # include all details of fairness validation
fair_statement_data = {} # only include the fairness result, i.e. fair or unfair, True represents fair, False represents unfair.
for si in chosed_atts:
# get the unique value of this sensitive attribute
values_si_att = list(data[si].unique())
# for each value, compute the current pairs and estimated fair pairs
si_value_json = {}
si_fair_json = {}
for vi in values_si_att:
# run FAIR oracle to compute its p-value and alpha_c
p_value_fair,alphac_fair = computePvalueFAIR(si,vi,current_file,top_K)
res_fair= p_value_fair > alphac_fair
# run Pairwise orace to compute its p-value, alpha use the default value
p_value_pairwise = computePvaluePairwise(si,vi,current_file)
res_pairwise = p_value_pairwise > alpha_default
# run Proportion oracle to compute its p-value, alpha use the default value
p_value_proportion = computePvalueProportion(si,vi,current_file,top_K)
res_proportion = p_value_proportion > alpha_default
if not isinstance(vi, str):
filled_vi = vi
else:
filled_vi = vi.replace(" ", "")
si_value_json[filled_vi] = [p_value_fair,alphac_fair,p_value_pairwise,alpha_default,p_value_proportion,alpha_default]
si_fair_json[filled_vi] = [res_fair,res_pairwise,res_proportion]
if not isinstance(si, str):
filled_si = si
else:
filled_si = si.replace(" ", "")
fair_res_data[filled_si] = si_value_json
fair_statement_data[filled_si] = si_fair_json
return fair_res_data, fair_statement_data, alpha_default, top_K
def computePvalueFAIR(att_name,att_value,current_file,top_K,round_default=2):
"""
Compute p-value using FA*IR oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
top_K: top_K value in FA*IR
round_default: threshold of round function for the returned p-value
Return: rounded p-value and adjusted significance level in FA*IR
"""
# input checked_atts includes names of checked sensitive attributes
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
top_data = data[0:top_K]
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name]==att_value].index+1
size_vi = len(position_lists_val)
fair_p_vi = size_vi/total_N
# generate a ranking of tuples with (id,"pro")/(id,"unpro") by current value as protected group
generated_ranking = []
for index, row in top_data.iterrows():
if row[att_name] == att_value:
generated_ranking.append([index,"pro"])
else:
generated_ranking.append([index,"unpro"])
p_value, isFair, posiFail, alpha_c, pro_needed_list = computeFairRankingProbability(top_K,fair_p_vi,generated_ranking)
return round(p_value,round_default),round(alpha_c,round_default)
def computePvaluePairwise(att_name,att_value,current_file, round_default=2):
"""
Compute p-value using Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
run_time: running times of simulation using mergeUnfairRanking
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
fair_p_vi = size_vi / total_N
# get the pre-computed pairwise results from simulation
# simu_data = read_json_file("/home/ec2-user/dataResponsiblyUI/playdata/SimulationPairs_N1000_R1000.json")
simu_data = read_json_file("/home/ec2-user/dataResponsiblyUI/playdata/SimulationPairs_N"+str(total_N)+"_R1000.json")
all_fair_p = list(simu_data.keys())
if str(fair_p_vi) in all_fair_p:
cur_pi = str(fair_p_vi)
else:
diff_p = []
for pi in all_fair_p:
num_pi = float(pi)
diff_p.append(abs(num_pi - fair_p_vi))
min_diff_index = diff_p.index(min(diff_p))
cur_pi = all_fair_p[min_diff_index]
# compute the number of pairs of value > * in the input ranking that is stored in the current file
pair_N_vi, estimated_fair_pair_vi, size_vi = computePairN(att_name,att_value,current_file)
# compute the cdf, i.e. p-value of input pair value
sample_pairs = simu_data[cur_pi]
cdf_pair = Cdf(sample_pairs,pair_N_vi)
# decide to use left tail or right tail
# mode_pair_sim,_ = mode(sample_pairs)
# median_mode = np.median(list(mode_pair_sim))
# if pair_N_vi <= mode_pair_sim:
# p_value = cdf_pair
# else:
# p_value = 1- cdf_pair
return round(cdf_pair,round_default)
def computePvaluePairwise_simu(att_name,att_value,current_file, run_time=100, round_default=2):
"""
Compute p-value using Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
run_time: running times of simulation using mergeUnfairRanking
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
fair_p_vi = size_vi / total_N
seed_random_ranking = [x for x in range(total_N)] # list of IDs
seed_f_index = [x for x in range(size_vi)] # list of IDs
# for simulation outputs
data_file = "./media/FairRankingGeneration"
plot_df = pd.DataFrame(columns=["RunCount", "N", "sensi_n", "fair_mp", "pair_n"])
# run simulations, in each simulation, generate a fair ranking with input N and size of sensitive group
for ri in range(run_time):
# only for binary sensitive attribute
output_ranking = mergeUnfairRanking(seed_random_ranking, seed_f_index, fair_p_vi)
position_pro_list = [i for i in range(len(output_ranking)) if output_ranking[i] in seed_f_index]
count_sensi_prefered_pairs = 0
for i in range(len(position_pro_list)):
cur_position = position_pro_list[i]
left_sensi = size_vi - (i + 1)
count_sensi_prefered_pairs = count_sensi_prefered_pairs + (total_N - cur_position - left_sensi)
# count_other_prefered_pairs = (_input_sensi_n*(_input_n-_input_sensi_n)) - count_sensi_prefered_pairs
cur_row = [ri + 1, total_N, size_vi, fair_p_vi, count_sensi_prefered_pairs]
plot_df.loc[len(plot_df)] = cur_row
# save the data of pairs in fair ranking generation on server
plot_df.to_csv(data_file + "_R" + str(run_time) + "_N" + str(total_N) + "_S" + str(size_vi) + "_pairs.csv")
# compute the number of pairs of value > * in the input ranking that is stored in the current file
pair_N_vi, estimated_fair_pair_vi, size_vi = computePairN(att_name,att_value,current_file)
# compute the cdf, i.e. p-value of input pair value
sample_pairs = list(plot_df["pair_n"].dropna())
cdf_pair = Cdf(sample_pairs,pair_N_vi)
# decide to use left tail or right tail
# mode_pair_sim,_ = mode(sample_pairs)
# median_mode = np.median(list(mode_pair_sim))
# if pair_N_vi <= mode_pair_sim:
# p_value = cdf_pair
# else:
# p_value = 1- cdf_pair
return round(cdf_pair,round_default)
def computePvalueProportion(att_name,att_value,current_file, top_K, round_default=2):
"""
Compute p-value using Proportion oracle, i.e., z-test method of 4.1.3 in "A survey on measuring indirect discrimination in machine learning".
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold to decide the positive outcome. Ranked inside top_K is positive outcome. Otherwise is negative outcome.
round_default: threshold of round function for the returned p-value
Return: rounded p-value
"""
# using z-test method of 4.1.3 in "A survey on measuring indirect discrimination in machine learning"
# for binary attribute only
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
top_data = data[0:top_K]
# for attribute value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name] == att_value].index + 1
size_vi = len(position_lists_val)
size_other = total_N - size_vi
size_vi_top = len(top_data[top_data[att_name]==att_value].index +1)
size_other_top = top_K - size_vi_top
p_vi_top = size_vi_top / size_vi
p_other_top = size_other_top / size_other
p_vi_rest = 1 - p_vi_top
p_other_rest = 1- p_other_top
pooledSE = sqrt((p_vi_top * p_vi_rest/ size_vi) + ( p_other_top * p_other_rest / size_other))
z_test = (p_other_top - p_vi_top) / pooledSE
p_value = norm.sf(z_test)
return round(p_value,round_default)
def Cdf(_input_array, x):
"""
Compute the CDF value of input samples using left tail computation
Attributes:
_input_array: list of data points
x: current K value
Return: value of cdf
"""
# left tail
count = 0.0
for vi in _input_array:
if vi <= x:
count += 1.0
prob = count / len(_input_array)
return prob
def computeFairRankingProbability(k,p,generated_ranking,default_alpha=0.05):
"""
Sub-function to compute p-value used in FA*IR oracle
Attributes:
k: top_K value in FA*IR
p: minimum proportion of protected group
generated_ranking: input ranking of users
default_alpha: default significance level of FA*IR
Return: p-value, fairness, rank position fail, adjusted significance level and list of ranking positions that protected group should be using FA*IR
"""
## generated_ranking is a list of tuples (id, "pro"),...(id,"unpro")
gft = FairnessInRankingsTester(p, default_alpha, k, correctedAlpha=True)
posAtFail, isFair = gft.ranked_group_fairness_condition(generated_ranking)
p_value = gft.calculate_p_value_left_tail(k, generated_ranking)
return p_value, isFair, posAtFail, gft.alpha_c, gft.candidates_needed
def computePairN(att_name, att_value,current_file):
"""
Sub-function to compute number of pairs that input value > * used in Pairwise oracle
Attributes:
att_name: sensitive attribute name
att_value: value of protected group of above attribute
current_file: file name that stored the data (with out ".csv" suffix)
Return: number of pairs of att_value > * in input data, number of pairs of att_value > * estimated using proportion, and proportion of group with att_value
"""
# input checked_atts includes names of checked sensitive attributes
data = pd.read_csv(current_file + "_weightsum.csv")
total_N = len(data)
# get the unique value of this sensitive attribute
values_att = list (data[att_name].unique())
# for each value, compute the current pairs and estimated fair pairs
position_lists_val = data[data[att_name]==att_value].index+1
size_vi = len(position_lists_val)
count_vi_prefered_pairs = 0
for i in range(len(position_lists_val)):
cur_position = position_lists_val[i]
left_vi = size_vi - (i + 1)
count_vi_prefered_pairs = count_vi_prefered_pairs + (total_N - cur_position - left_vi)
# compute estimated fair pairs
total_pairs_vi = size_vi*(total_N-size_vi)
estimated_vi_pair = math.ceil((size_vi / total_N) * total_pairs_vi)
return int(count_vi_prefered_pairs),int(estimated_vi_pair),int(size_vi)
def getSizeOfRanking(current_file):
"""
Compute size of generated ranking.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
Return: size of ranking
"""
data = pd.read_csv(current_file+"_weightsum.csv")
return len(data)
def getSizeOfDataset(current_file):
"""
Compute number of rows in the input data.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
Return: number of rows in current_file
"""
data = pd.read_csv(current_file+".csv")
return len(data)
def generateRanking(current_file,top_K=100):
"""
Generate a ranking of input data.
Attributes:
current_file: file name that stored the data (with out ".csv" suffix)
top_K: threshold of returned generated ranking
Return: json data of a dataframe that stored the generated ranking
"""
ranks_file = current_file + "_rankings.json"
rankings_paras = read_json_file(ranks_file)
data = pd.read_csv(current_file + ".csv")
# before compute the score, replace the NA in the data with 0
filled_data = data.fillna(value=0)
chosed_atts = rankings_paras["ranked_atts"]
filled_data["GeneratedScore"] = 0
for i in range(len(chosed_atts)):
cur_weight = rankings_paras["ranked_atts_weight"][i]
filled_data["GeneratedScore"] += cur_weight * filled_data[chosed_atts[i]]
filled_data = filled_data.reindex_axis(['GeneratedScore'] + list([a for a in filled_data.columns if a != 'GeneratedScore']), axis=1)
# save data with weight sum to a csv on server
filled_data.sort_values(by="GeneratedScore",ascending=False,inplace=True)
filled_data.to_csv(current_file+"_weightsum.csv", index=False)
# only show top_K rows in the UI
display_data = filled_data.head(top_K)
return display_data.to_json(orient='records')
def standardizeData(inputdata,colums_to_exclude=[]):
"""
inputdata is a dataframe stored all the data read from a csv source file
noweightlist is a array like data structure stored the attributes which should be ignored in the normalization process.
return the distribution of every attribute
"""
df = inputdata.loc[:, inputdata.columns.difference(colums_to_exclude)]# remove no weight attributes
df_stand = (df - df.mean())/np.std(df)
inputdata.loc[:, inputdata.columns.difference(colums_to_exclude)] = df_stand
return inputdata
def normalizeDataset(input_file_name,noweightlist=[]):
"""
inputdata is the file name of the csv source file
noweightlist is a array like data structure stored the attributes which should be ignored in the normalization process.
return the processed inputdata
"""
input_data = pd.read_csv(input_file_name)
df = input_data.loc[:,input_data.columns.difference(noweightlist)] # remove no weight attributes
#normalize attributes
norm_df = (df - df.min()) / (df.max() - df.min())
input_data.loc[:,input_data.columns.difference(noweightlist)] = norm_df
return input_data
def cleanseData(input_file_name, columns_to_exclude=[]):
"""
inputdata is the file name of the csv source file
noweightlist is a array like data structure stored the attributes which should be ignored in the normalization process.
return the cleansed inputdata using normalizating and standization
"""
norm_data = normalizeDataset(input_file_name, columns_to_exclude)
return standardizeData(norm_data, columns_to_exclude)
class DataDescriberUI(object):
"""Analyze input dataset, then save the dataset description in a JSON file.
Used to display in datatable.
Attributes:
threshold_size: float, threshold when size of input_dataset exceed this value, then only display first 100 row in input_dataset
dataset_description: Dict, a nested dictionary (equivalent to JSON) recording the mined dataset information.
input_dataset: the dataset to be analyzed.
"""
def __init__(self, threshold_size=100):
self.threshold_size = threshold_size
self.dataset_description = {}
self.input_dataset = pd.DataFrame()
self.json_data = {}
def read_dataset_from_csv(self, file_name=None):
try:
self.input_dataset = pd.read_csv(file_name)
except (UnicodeDecodeError, NameError):
self.input_dataset = | pd.read_csv(file_name, encoding='latin1') | pandas.read_csv |
import os
from functools import reduce
import numpy as np
import pandas as pd
from sklearn.preprocessing import minmax_scale, scale
class Data():
def __init__(self, no_hist_days, no_hist_weeks, target_label, root_dir="", begin_test_date=None, scale_data=None):
data_daily = os.path.join(root_dir, "data/slovenia_daily.csv")
data_weekly = os.path.join(root_dir, "data/slovenia_weekly.csv")
self.df_daily = | pd.read_csv(data_daily) | pandas.read_csv |
# File written by <NAME> on July 7 2020
# In reference to: https://github.com/hudson-and-thames/research/blob/master/Advances%20in%20Financial%20Machine%20Learning/Labelling/Trend-Follow-Question.ipynb
# Data from here: https://github.com/hudson-and-thames/research/blob/master/Advances%20in%20Financial%20Machine%20Learning/Labelling/sample_dollar_bars.csv
# Imports
import pandas as pd
import mlfinlab as ml
import sys
import mlfinlab.data_structures as ds
import numpy as np
import os
import datetime
import math
import sklearn as sk
from mlfinlab.datasets import (load_tick_sample, load_stock_prices, load_dollar_bar_sample)
import matplotlib.pyplot as plt
import pyfolio as pf
if __name__ == '__main__':
# Initialize Data
# Set index
csv_name = sys.argv[1]
data = pd.read_csv(csv_name)
data['date_time'] = | pd.to_datetime(data.date_time) | pandas.to_datetime |
import logging
import sys
import os
import requests as req
import datetime
import pandas as pd
import cartoframes
import requests
import numpy as np
### Constants
SOURCE_URL = "https://ngdc.noaa.gov/nndc/struts/results?type_0=Exact&query_0=$ID&t=101650&s=69&d=59&dfn=tsevent.txt"
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG_LEVEL = logging.INFO
### Table name and structure
CARTO_TABLE = 'dis_009_tsunamis'
DATASET_ID = '2fb159b3-e613-40ec-974c-21b22c930ce4'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
DATASET_ID = '2fb159b3-e613-40ec-974c-21b22c930ce4'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
###
## Accessing remote data
###
def create_geom(lat, lon):
if lat:
geom = {
"type": "Point",
"coordinates": [
lon,
lat
]
}
return geom
else:
return None
def processData():
"""
Inputs: SOURCE_URL where data is stored
Actions: Retrives data, creates date column, and returns dataframe
Output: Dataframe with data
"""
data = req.get(SOURCE_URL).text
data = data.split('\n')
lines = [line.split('\t') for line in data]
header = lines[0]
rows = lines[1:]
df = pd.DataFrame(rows)
df.columns = header
df['the_geom'] = list(map(lambda coords: create_geom(*coords), zip(df['LATITUDE'],df['LONGITUDE'])))
text_cols = ['the_geom', 'COUNTRY', 'STATE', 'LOCATION_NAME']
number_cols = [x for x in df.columns if x not in text_cols]
df = df.replace(r'^\s*$', np.nan, regex=True)
for col in number_cols:
print(col)
df[col] = pd.to_numeric(df[col], errors='coerce')
return(df)
def get_most_recent_date(table):
#year = table.sort_values(by=['YEAR', 'MONTH', 'DAY'], ascending=True)['YEAR']
#convert date values to numeric for sorting
table.YEAR = | pd.to_numeric(table.YEAR, errors='coerce') | pandas.to_numeric |
import unittest
import pandas as pd
import numpy as np
from ..timeseries import TimeSeries
class TimeSeriesTestCase(unittest.TestCase):
times = pd.date_range('20130101', '20130110')
pd_series1 = pd.Series(range(10), index=times)
pd_series2 = pd.Series(range(5, 15), index=times)
pd_series3 = pd.Series(range(15, 25), index=times)
series1: TimeSeries = TimeSeries(pd_series1)
series2: TimeSeries = TimeSeries(pd_series1, pd_series2, pd_series3)
series3: TimeSeries = TimeSeries(pd_series2)
def test_creation(self):
with self.assertRaises(ValueError):
# Index is dateTimeIndex
TimeSeries(pd.Series(range(10), range(10)))
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_hi = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, None, pd_hi)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, None, pd_lo)
with self.assertRaises(ValueError):
# Main series cannot have date holes
range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))
TimeSeries(pd.Series(range(9), index=range_))
series_test = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
def test_alt_creation(self):
with self.assertRaises(ValueError):
# Series cannot be lower than three
index = pd.date_range('20130101', '20130102')
TimeSeries.from_times_and_values(index, self.pd_series1.values[:2])
with self.assertRaises(ValueError):
# all array must have same length
TimeSeries.from_times_and_values(self.pd_series1.index,
self.pd_series1.values[:-1],
self.pd_series2[:-2],
self.pd_series3[:-1])
# test if reordering is correct
rand_perm = np.random.permutation(range(1, 11))
index = pd.to_datetime(['201301{:02d}'.format(i) for i in rand_perm])
series_test = TimeSeries.from_times_and_values(index, self.pd_series1.values[rand_perm-1],
self.pd_series2[rand_perm-1],
self.pd_series3[rand_perm-1].tolist())
self.assertTrue(series_test.start_time() == pd.to_datetime('20130101'))
self.assertTrue(series_test.end_time() == pd.to_datetime('20130110'))
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
self.assertTrue(series_test.freq() == self.series1.freq())
# TODO test over to_dataframe when multiple features choice is decided
def test_eq(self):
seriesA: TimeSeries = TimeSeries(self.pd_series1)
self.assertTrue(self.series1 == seriesA)
# with a defined CI
seriesB: TimeSeries = TimeSeries(self.pd_series1,
confidence_hi=pd.Series(range(10, 20),
index=pd.date_range('20130101', '20130110')))
self.assertFalse(self.series1 == seriesB)
self.assertTrue(self.series1 != seriesB)
# with different dates
seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))
self.assertFalse(self.series1 == seriesC)
# compare with both CI
seriesD: TimeSeries = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
seriesE: TimeSeries = TimeSeries(self.pd_series1, self.pd_series3, self.pd_series2)
self.assertTrue(self.series2 == seriesD)
self.assertFalse(self.series2 == seriesE)
def test_dates(self):
self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))
self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))
self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))
def test_slice(self):
# base case
seriesA = self.series1.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesA.start_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130107'))
# time stamp not in series
seriesB = self.series1.slice(pd.Timestamp('20130104 12:00:00'), pd.Timestamp('20130107'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))
# end timestamp after series
seriesC = self.series1.slice(pd.Timestamp('20130108'), pd.Timestamp('20130201'))
self.assertEqual(seriesC.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesC.end_time(), pd.Timestamp('20130110'))
# n points, base case
seriesD = self.series1.slice_n_points_after(pd.Timestamp('20130102'), n=3)
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130102'))
self.assertTrue(len(seriesD.values()) == 3)
self.assertEqual(seriesD.end_time(), pd.Timestamp('20130104'))
seriesE = self.series1.slice_n_points_after(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesE.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesE.end_time(), pd.Timestamp('20130110'))
seriesF = self.series1.slice_n_points_before(pd.Timestamp('20130105'), n=3)
self.assertEqual(seriesF.end_time(), pd.Timestamp('20130105'))
self.assertTrue(len(seriesF.values()) == 3)
self.assertEqual(seriesF.start_time(), pd.Timestamp('20130103'))
seriesG = self.series1.slice_n_points_before(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesG.start_time(), pd.Timestamp('20130101'))
self.assertEqual(seriesG.end_time(), pd.Timestamp('20130107'))
# with CI
seriesH = self.series2.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesH.conf_lo_pd_series().index[0], pd.Timestamp('20130104'))
self.assertEqual(seriesH.conf_lo_pd_series().index[-1], pd.Timestamp('20130107'))
self.assertEqual(seriesH.conf_hi_pd_series().index[0], pd.Timestamp('20130104'))
self.assertEqual(seriesH.conf_hi_pd_series().index[-1], pd.Timestamp('20130107'))
def test_split(self):
seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
seriesC, seriesD = self.series1.split_before(pd.Timestamp('20130104'))
self.assertEqual(seriesC.end_time(), pd.Timestamp('20130103'))
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130104'))
self.assertEqual(self.series1.freq_str(), seriesA.freq_str())
self.assertEqual(self.series1.freq_str(), seriesC.freq_str())
def test_drop(self):
seriesA = self.series1.drop_after(pd.Timestamp('20130105'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130105') - self.series1.freq())
self.assertTrue(np.all(seriesA.time_index() < pd.Timestamp('20130105')))
seriesB = self.series1.drop_before(pd.Timestamp('20130105'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105') + self.series1.freq())
self.assertTrue(np.all(seriesB.time_index() > pd.Timestamp('20130105')))
self.assertEqual(self.series1.freq_str(), seriesA.freq_str())
self.assertEqual(self.series1.freq_str(), seriesB.freq_str())
def test_intersect(self):
seriesA = TimeSeries(pd.Series(range(2, 8), index=pd.date_range('20130102', '20130107')))
seriesB = self.series1.slice_intersect(seriesA)
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130102'))
self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))
# The same, with CI
seriesC = self.series2.slice_intersect(seriesA)
self.assertEqual(seriesC.conf_lo_pd_series().index[0], pd.Timestamp('20130102'))
self.assertEqual(seriesC.conf_hi_pd_series().index[-1], pd.Timestamp('20130107'))
# Outside of range
seriesD = self.series1.slice_intersect(TimeSeries(pd.Series(range(6, 13),
index=pd.date_range('20130106', '20130112'))))
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130106'))
self.assertEqual(seriesD.end_time(), pd.Timestamp('20130110'))
# No intersect or too small intersect
with self.assertRaises(ValueError):
self.series1.slice_intersect(TimeSeries(pd.Series(range(6, 13),
index=pd.date_range('20130116', '20130122'))))
with self.assertRaises(ValueError):
self.series1.slice_intersect(TimeSeries(pd.Series(range(9, 13),
index=pd.date_range('20130109', '20130112'))))
def test_rescale(self):
with self.assertRaises(ValueError):
self.series1.rescale_with_value(1)
seriesA = self.series3.rescale_with_value(0)
self.assertTrue(np.all(seriesA.values() == 0))
seriesB = self.series3.rescale_with_value(-5)
self.assertTrue(self.series3 * -1. == seriesB)
seriesC = self.series3.rescale_with_value(1)
self.assertTrue(self.series3 * 0.2 == seriesC)
seriesD = self.series3.rescale_with_value(1e+20) # TODO: test will fail if value > 1e24 due to num imprecision
self.assertTrue(self.series3 * 0.2e+20 == seriesD)
def test_shift(self):
seriesA = self.series1.shift(0)
self.assertTrue(seriesA == self.series1)
seriesB = self.series1.shift(1)
self.assertTrue(seriesB.time_index().equals(
self.series1.time_index()[1:].append(pd.DatetimeIndex([self.series1.time_index()[-1] +
self.series1.freq()]))))
seriesC = self.series1.shift(-1)
self.assertTrue(seriesC.time_index().equals(
pd.DatetimeIndex([self.series1.time_index()[0] - self.series1.freq()]).append(
self.series1.time_index()[:-1])))
with self.assertRaises(OverflowError):
self.series1.shift(1e+6)
seriesM = TimeSeries.from_times_and_values(pd.date_range('20130101', '20130601', freq='m'), range(5))
with self.assertRaises(OverflowError):
seriesM.shift(1e+4)
def test_append(self):
# reconstruct series
seriesA, seriesB = self.series1.split_after(pd.Timestamp('20130106'))
self.assertEqual(seriesA.append(seriesB), self.series1)
self.assertEqual(seriesA.append(seriesB).freq(), self.series1.freq())
# Creating a gap is not allowed
seriesC = self.series1.drop_before(pd.Timestamp('20130107'))
with self.assertRaises(ValueError):
seriesA.append(seriesC)
# Changing frequence is not allowed
seriesM = TimeSeries.from_times_and_values(pd.date_range('20130107', '20130507', freq='30D'), range(5))
with self.assertRaises(ValueError):
seriesA.append(seriesM)
# reconstruction with CI
seriesD, seriesE = self.series2.split_after(pd.Timestamp('20130106'))
self.assertEqual(seriesD.append(seriesE), self.series2)
self.assertEqual(seriesD.append(seriesE).freq(), self.series2.freq())
def test_append_values(self):
# reconstruct series
seriesA, seriesB = self.series1.split_after( | pd.Timestamp('20130106') | pandas.Timestamp |
# -*- coding:utf-8 -*-
"""
#====#====#====#====
# Project Name: RNN-SignalProcess
# File Name: SignalProcess
# Date: 3/4/18 8:47 AM
# Using IDE: PyCharm Community Edition
# From HomePage: https://github.com/DuFanXin/RNN
# Author: DuFanXin
# BlogPage: http://blog.csdn.net/qq_30239975
# E-mail: <EMAIL>
# Copyright (c) 2018, All Rights Reserved.
#====#====#====#====
"""
import tensorflow as tf
import argparse
import os
EPOCH_NUM = 1
TRAIN_BATCH_SIZE = 128
VALIDATION_BATCH_SIZE = 256
TEST_BATCH_SIZE = 256
PREDICT_BATCH_SIZE = 1
PREDICT_SAVED_DIRECTORY = '../data_set/my_set/predictions'
EPS = 10e-5
FLAGS = None
CLASS_NUM = 4
TIME_STEP = 2600
UNITS_NUM = 128
TRAIN_SET_NAME = 'train_set.tfrecords'
VALIDATION_SET_NAME = 'validation_set.tfrecords'
TEST_SET_NAME = 'test_set.tfrecords'
PREDICT_SET_NAME = 'predict_set.tfrecords'
CHECK_POINT_PATH = '../data/History/train_2nd'
def write_data_to_tfrecords():
import numpy as np
import pandas as pd
type_to_num = {
'galaxy': 0,
'qso': 1,
'star': 2,
'unknown': 3
}
train_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, TRAIN_SET_NAME))
validation_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, VALIDATION_SET_NAME))
test_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, TEST_SET_NAME))
predict_set_writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.data_dir, PREDICT_SET_NAME))
train_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'train_set.csv'), header=0, sep=',')
# splite_merge_csv()
# print(train_set.head())
row_num = train_set.shape[0]
for index, row in train_set.iterrows():
# print(row['id'])
train_list = np.loadtxt(
os.path.join('../data/first_train_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[type_to_num[row['type']]])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[train_list.tobytes()]))
}))
train_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done train_set writing %.2f%%' % (index / row_num * 100))
train_set_writer.close()
print("Done train_set writing")
validation_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'validation_set.csv'), header=0, sep=',')
# splite_merge_csv()
# print(validation_set.head())
row_num = validation_set.shape[0]
for index, row in validation_set.iterrows():
# print(row['type'])
validation_list = np.loadtxt(
os.path.join('../data/first_train_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[type_to_num[row['type']]])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[validation_list.tobytes()]))
}))
validation_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done validation_set writing %.2f%%' % (index / row_num * 100))
validation_set_writer.close()
print("Done validation_set writing")
test_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'validation_set.csv'), header=0, sep=',')
# splite_merge_csv()
# print(test_set.head())
row_num = test_set.shape[0]
for index, row in test_set.iterrows():
# print(row['type'])
test_list = np.loadtxt(
os.path.join('../data/first_train_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0, dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[row['id']])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[test_list.tobytes()]))
}))
test_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done test_set writing %.2f%%' % (index / row_num * 100))
test_set_writer.close()
print("Done test_set writing")
predict_set = pd.read_csv(filepath_or_buffer=os.path.join(FLAGS.data_dir, 'first_test_index_20180131.csv'), header=0, sep=',')
# splite_merge_csv()
# print(predict_set.head())
row_num = predict_set.shape[0]
for index, row in predict_set.iterrows():
# print(row['type'])
predict_list = np.loadtxt(
os.path.join('../data/first_test_data_20180131', '%d.txt' % row['id']), delimiter=",", skiprows=0,
dtype=np.float32)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[row['id']])),
'signal': tf.train.Feature(bytes_list=tf.train.BytesList(value=[predict_list.tobytes()]))
}))
predict_set_writer.write(example.SerializeToString()) # 序列化为字符串
if index % 100 == 0:
print('Done predict_set writing %.2f%%' % (index / row_num * 100))
predict_set_writer.close()
print("Done predict_set writing")
def read_image(file_queue):
reader = tf.TFRecordReader()
# key, value = reader.read(file_queue)
_, serialized_example = reader.read(file_queue)
features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'signal': tf.FixedLenFeature([], tf.string)
})
signal = tf.decode_raw(features['signal'], tf.float32)
# print('image ' + str(image))
# image = tf.reshape(image, [INPUT_IMG_WIDE, INPUT_IMG_HEIGHT, INPUT_IMG_CHANNEL])
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# image = tf.image.resize_images(image, (IMG_HEIGHT, IMG_WIDE))
# signal = tf.cast(features['signal'], tf.float32)
signal = tf.reshape(signal, [2600, 1])
# label = tf.decode_raw(features['label'], tf.int64)
label = tf.cast(features['label'], tf.int32)
# label = tf.reshape(label, [OUTPUT_IMG_WIDE, OUTPUT_IMG_HEIGHT])
# label = tf.decode_raw(features['image_raw'], tf.uint8)
# print(label)
# label = tf.reshape(label, shape=[1, 4])
return signal, label
def read_image_batch(file_queue, batch_size):
img, label = read_image(file_queue)
min_after_dequeue = 2000
capacity = 4000
# image_batch, label_batch = tf.train.batch([img, label], batch_size=batch_size, capacity=capacity, num_threads=10)
image_batch, label_batch = tf.train.shuffle_batch(
tensors=[img, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue)
# one_hot_labels = tf.to_float(tf.one_hot(indices=label_batch, depth=CLASS_NUM))
one_hot_labels = tf.reshape(label_batch, [batch_size])
return image_batch, one_hot_labels
def read_check_tfrecords():
train_file_path = os.path.join(FLAGS.data_dir, 'train_set.tfrecords')
train_image_filename_queue = tf.train.string_input_producer(
string_tensor=tf.train.match_filenames_once(train_file_path), num_epochs=1, shuffle=True)
# train_images, train_labels = read_image(train_image_filename_queue)
train_images, train_labels = read_image_batch(file_queue=train_image_filename_queue, batch_size=5)
# one_hot_labels = tf.to_float(tf.one_hot(indices=train_labels, depth=CLASS_NUM))
with tf.Session() as sess: # 开始一个会话
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
signal, label = sess.run([train_images, train_labels])
print(signal)
print(signal.shape)
print(label)
# print(sess.run(one_hot_labels))
coord.request_stop()
coord.join(threads)
print("Done reading and checking")
def splite_merge_csv():
import pandas as pd
df = pd.read_csv(filepath_or_buffer='../data/first_train_index_20180131.csv', header=0, sep=',')
train_set = pd.DataFrame()
validate_set = pd.DataFrame()
# print(df.head())
groups = df.groupby('type')
print(groups.count())
for name, group in groups:
if name == 'galaxy':
train_set = pd.concat([train_set, group[:5200]])
validate_set = pd.concat([validate_set, group[5200:]])
elif name == 'qso':
train_set = | pd.concat([train_set, group[:1300]]) | pandas.concat |
import talib
from datetime import datetime, tzinfo
from datetime import date
import yfinance as yf
import pandas as pd
import MetaTrader5 as mt5
import pytz
# ^BVSP
# data = yf.download("SPY", start="2020-09-01", end="2021-11-02")
# real = talib.CDLMORNINGSTAR(
# data['Open'], data['High'], data['Low'], data['Close'])
# print(real[real != 0])
if not mt5.initialize():
print("Inicialize faleid")
mt5.shutdown()
print(f"MT5 version: {mt5.__version__}")
print(f"Empresa: {mt5.__author__}")
# solicitamos 1 000 ticks de EURAUD
# euraud_ticks = mt5.copy_ticks_from("EURUSD", datetime(2021,8,28,13), 1000, mt5.COPY_TICKS_ALL)
# Obter timezone da corretora activtrade
# timezone = pytz.timezone("Europe/Luxembourg")
fusoHorario = pytz.timezone("Europe/Luxembourg")
data_atual = datetime.now()
data_hora_luxembourg = data_atual.astimezone(fusoHorario)
data_lux_tx = data_hora_luxembourg.strftime('%d/%m/%Y %H:%M')
ano= int(date.strftime(data_hora_luxembourg, '%Y'))
mes= int(date.strftime(data_hora_luxembourg, '%m'))
dia= int(date.strftime(data_hora_luxembourg, '%d'))
print(f"HOra luxemburgo: {data_lux_tx}")
# criamos o objeto datetime no fuso horário UTC para que não seja aplicado o deslocamento do fuso horário local
print(f"TimeZone Luxemburgo: {data_lux_tx}")
# dataAtual = date.today()
# mes = int(gmt.strftime('%m'))
# ano = int(gmt.strftime('%Y'))
# dia = int(gmt.strftime('%d'))
# gmtL = datetime.now(gmt)
# dl = datetime.strftime(gmt, '%Y-%d-%m %H:%M')
# print(f"DAta Luxembourg: {dl}")
# utc_from = datetime(ano, mes, dia)
# gmt_from = datetime(ano, mes, dia, tzinfo=fusoHorario)
# solicitamos 20 barras de EURUSD M5 do dia atual
rates = mt5.copy_rates_from_pos("EURUSD", mt5.TIMEFRAME_M5, 0, 200)
mt5.shutdown()
rates_frame = | pd.DataFrame(rates) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_default = datetime.now().strftime('%Y%m%d')
indexTickerUnivSR_default = np.array(['000300.SH', '000016.SH', '000905.SH'])
indexTickerNameUnivSR_default = np.array(['沪深300', '上证50', '中证500'])
# Global val
conn243 = pymssql.connect(server='192.168.1.243', user="yuman.hu", password="<PASSWORD>")
conn247 = pymssql.connect(server='192.168.1.247', user="yuman.hu", password="<PASSWORD>")
# daily data download
class dailyQuant(object):
def __init__(self, startDate=startDate_default, endDate=endDate_default,
indexTickerUnivSR=indexTickerUnivSR_default, indexTickerNameUnivSR=indexTickerNameUnivSR_default):
self.startDate = startDate
self.endDate = endDate
self.rawData_path = '../data/rawData/'
self.fundamentalData_path = '../data/fundamentalData/'
self.indexTickerUnivSR = indexTickerUnivSR
self.indexTickerNameUnivSR = indexTickerNameUnivSR
self.tradingDateV, self.timeSeries = self.get_dateData()
self.tickerUnivSR, self.stockTickerUnivSR, self.tickerNameUnivSR, self.stockTickerNameUnivSR, self.tickerUnivTypeR = self.get_tickerData()
def get_dateData(self):
sql = '''
SELECT [tradingday]
FROM [Group_General].[dbo].[TradingDayList]
where tradingday>='20060101'
order by tradingday asc
'''
dateSV = pd.read_sql(sql, conn247)
tradingdays = dateSV.tradingday.unique()
tradingDateV = np.array([x.replace('-', '') for x in tradingdays])
timeSeries = pd.Series(pd.to_datetime(tradingDateV))
pd.Series(tradingDateV).to_csv(self.rawData_path+ 'tradingDateV.csv', index=False)
return tradingDateV, timeSeries
def get_tickerData(self):
# and B.[SecuAbbr] not like '%%ST%%'
# where ChangeDate>='%s'
sql = '''
SELECT A.[ChangeDate],A.[ChangeType],B.[SecuCode],B.[SecuMarket],B.[SecuAbbr]
FROM [JYDB].[dbo].[LC_ListStatus] A
inner join [JYDB].[dbo].SecuMain B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
order by SecuCode asc
'''
dataV = pd.read_sql(sql, conn243)
flagMarket = dataV.SecuMarket == 83
dataV['SecuCode'][flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SH')
dataV['SecuCode'][~flagMarket] = dataV['SecuCode'].map(lambda x: x + '.SZ')
# dataV.ChangeDate = pd.Series([x.strftime('%Y%m%d') for x in dataV.ChangeDate.values])
dataV.ChangeDate = dataV.ChangeDate.map(lambda x: x.strftime('%Y%m%d'))
flagV = np.full(len(dataV), True)
flagList = []
for i in range(len(dataV)):
if dataV.iat[i, 1] == 4:
if dataV.iat[i, 0] < self.tradingDateV[0]:
flagList.append(dataV.iat[i, 2])
for i in range(len(dataV)):
if dataV.iat[i, 2] in flagList:
flagV[i] = False
dataV = dataV[flagV]
stockTickerUnivSR = dataV.SecuCode.unique()
tickerUnivSR = np.append(self.indexTickerUnivSR, stockTickerUnivSR)
stockTickerNameUnivSR = dataV.SecuAbbr.unique()
tickerNameUnivSR = np.append(self.indexTickerNameUnivSR, stockTickerNameUnivSR)
tickerUnivTypeR = np.append(np.full(len(self.indexTickerUnivSR), 3), np.ones(len(dataV)))
pd.DataFrame(self.indexTickerUnivSR).T.to_csv(self.rawData_path+'indexTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerUnivSR).T.to_csv(self.rawData_path+'stockTickerUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivSR).T.to_csv(self.rawData_path+'tickerUnivSR.csv', header=False, index=False)
pd.DataFrame(self.indexTickerNameUnivSR).T.to_csv(self.rawData_path+'indexTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(stockTickerNameUnivSR).T.to_csv(self.rawData_path+'stockTickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerNameUnivSR).T.to_csv(self.rawData_path+'tickerNameUnivSR.csv', header=False, index=False)
pd.DataFrame(tickerUnivTypeR).T.to_csv(self.rawData_path+'tickerUnivTypeR.csv', header=False, index=False)
return tickerUnivSR, stockTickerUnivSR, tickerNameUnivSR, stockTickerNameUnivSR, tickerUnivTypeR
def __tradingData(self,tradingDay):
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_DailyQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and B.SecuCategory=1
where A.tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn243)
sql = '''
SELECT A.[TradingDay], B.[SecuMarket], B.[SecuCode], A.[PrevClosePrice],
A.[OpenPrice],A.[HighPrice],A.[LowPrice],A.[ClosePrice], A.[TurnoverVolume],A.[TurnoverValue]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataV = pd.concat([dataIndex,dataStock])
sql = '''
SELECT [TradingDay], [SecuCode], [StockReturns]
FROM [Group_General].[dbo].[DailyQuote]
where tradingday='%s'
''' % tradingDay
dataStock = pd.read_sql_query(sql, conn247)
sql = '''
SELECT A.[TradingDay], B.[SecuCode], A.[ChangePCT]
FROM [JYDB].[dbo].[QT_IndexQuote] A
inner join [JYDB].[dbo].[SecuMain] B
on A.[InnerCode]=B.[InnerCode]
and B.SecuMarket in (83,90)
and (B.SecuCode = '000300' or B.SecuCode = '000016' or B.SecuCode = '000905')
and B.SecuCategory=4
where A.tradingday='%s'
''' % tradingDay
dataIndex = pd.read_sql_query(sql, conn243)
dataIndex.ChangePCT = dataIndex.ChangePCT / 100
dataIndex = dataIndex.rename({'ChangePCT': 'StockReturns'}, axis='columns')
dataR = pd.concat([dataIndex, dataStock])
data = pd.merge(dataV,dataR)
flagMarket = data.SecuMarket==83
data['SecuCode'][flagMarket] = data['SecuCode'].map(lambda x: x + '.SH')
data['SecuCode'][~flagMarket] = data['SecuCode'].map(lambda x: x + '.SZ')
data.TradingDay = data.TradingDay.map(lambda x: x.strftime('%Y%m%d'))
preCloseM = pd.DataFrame(pd.pivot_table(data,values='PrevClosePrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
openM = pd.DataFrame(pd.pivot_table(data,values='OpenPrice',index='TradingDay',columns='SecuCode'),index=[str(tradingDay)],columns=self.tickerUnivSR)
highM = pd.DataFrame( | pd.pivot_table(data,values='HighPrice',index='TradingDay',columns='SecuCode') | pandas.pivot_table |
import pickle
#from .retrieve_marks import norm2
import os
import _pickle as cPickle
#import cache
from scipy.interpolate import interp1d
import pandas as pd
import numpy as np
import glob
import pprint
from scipy.signal import find_peaks
chromlength_human =[249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,
141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,
59128983,63025520,48129895,51304566]
chromlength_yeast =[230218,813184,316620,1531933,576874,270161,1090940,562643,439888,
745751,666816,1078177,924431,784333,1091291,948066]
try:
import pyBigWig
import gffutils
except:
print("You may need to install pyBigWig")
pp = pprint.PrettyPrinter(indent=2)
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
# string,string -> bool,[],res
# select a strain and an experimental value and return if available, the files and the resolution
# def is_available(strain, experiment):
# return True,[],1
ROOT = "../DNaseI/data/"
def nan_polate_c(A, kind="linear"):
ok = ~np.isnan(A)
x = np.arange(len(A))
f2 = interp1d(x[ok], A[ok], kind=kind, bounds_error=False)
# print(ok)
return f2(x)
def is_available_alias(strain, experiment):
alias = {"Hela": ["Hela", "HeLaS3", "Helas3"],
"Helas3": ["Helas3", "HeLaS3", "Hela"],
"GM12878": ["GM12878", "Gm12878"],
"Gm12878": ["GM12878", "Gm12878"]
}
# alias={"Helas3":["HeLaS3","Hela","Helas3"]}
if strain not in alias.keys():
avail, files, res = is_available(strain, experiment)
else:
for strain1 in alias[strain]:
avail, files, res = is_available(strain1, experiment)
if files != []:
if strain1 != strain:
print("Using alias %s" % strain1)
return avail, files, res
return avail, files, res
def is_available(strain, experiment):
avail_exp = ["MRT", "OKSeq", "OKSeqo", "DNaseI", "ORC2", "ExpGenes", "Faire", "Meth", "Meth450",
"Constant", "OKSeqF", "OKSeqR", "OKSeqS", "CNV", "NFR",
"MCM", "HMM", "GC", "Bubble","G4","G4p","G4m","Ini","ORC1","AT_20","AT_5","AT_30","RHMM","MRTstd",
"RNA_seq","MCMo","MCMp","MCM-beda","Mcm3","Mcm7","Orc2","Orc3"]
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1', "SNS"]
marks_bw = [m + "wig" for m in marks]
Prot = ["Rad21","ORC2"]
#print("La")
if strain in ["Yeast-MCM"]:
lroot = ROOT+"/external/"+strain + "/"
resolutions = glob.glob(lroot + "/*")
#print(lroot + "/*")
resolutions = [r.split("/")[-1] for r in resolutions if "kb" in r]
#print(resolutions)
if len(resolutions) != 0:
exps = glob.glob(lroot + resolutions[0]+"/*")
files = []+exps
exps = [exp.split("/")[-1][:] for exp in exps if "csv" in exp]
print(exps)
for iexp,exp in enumerate(exps):
if exp == experiment:
return True,[files[iexp]],int(resolutions[0].replace("kb",""))
if strain in ["Cerevisae"] and experiment =="MCM-beda":
lroot = ROOT+"/external/Yeast-MCM-bedalov/"
return True,glob.glob(lroot+"/*"),0.001
if experiment not in avail_exp + marks + Prot + marks_bw:
print("Exp %s not available" % experiment)
print("Available experiments", avail_exp + marks + Prot)
return False, [], None
if experiment == "Constant":
return True, [], 1
if experiment == "MRT":
if strain == "Cerevisae":
return True, ["/home/jarbona/ifromprof/notebooks/exploratory/Yeast_wt_alvino.csv"], 1
elif strain == "Raji":
files = glob.glob(ROOT + "/external/timing_final//*_Nina_Raji_logE2Lratio_w100kbp_dw10kbp.dat" )
return True, files, 10
else:
root = ROOT + "/Data/UCSC/hsap_hg19/downloads/ENCODE/wgEncodeUwRepliSeq_V2/compute_profiles/timing_final/"
root = ROOT + "/external/timing_final/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/timing_final_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "MRTstd":
root = ROOT + "/external/Sfrac/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/Sfrac_HansenNormDenoised_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "ExpGenes":
root = ROOT + "/external/ExpressedGenes/"
extract = glob.glob(root + "/*ExpressedGenes_zero.txt")
# print(extract)
cells = [e.split("/")[-1].replace("ExpressedGenes_zero.txt", "") for e in extract]
print(cells)
if strain in cells:
files = glob.glob(root + "/%sExpressedGenes_zero.txt" % strain)
return True, files, 10
if experiment == "RNA_seq":
root = ROOT + "external//RNA_seq//"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*Tot")
# print(extract)
cells = [e.split("/")[-1].split("_")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + "_Tot/*")
files.sort()
return True, files, 1
if experiment == "NFR":
root = ROOT + "/external/NFR/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
return True, extract, 1
if experiment == "Bubble":
root = ROOT + "/external/Bubble/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bedgraph")
# print(extract)
cells = [e.split("/")[-1].split(".bedgraph")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bedgraph")
files.sort()
return True, files, 1
#print("IRCRRRRRRRRRRRRRRRRRRRR")
if experiment == "ORC1":
#print("LA")
root = ROOT + "/external/ORC1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].split(".bed")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bed")
files.sort()
return True, files,
if (experiment in ["Mcm3","Mcm7","Orc2","Orc3"]) and strain =="Raji":
return True,glob.glob(ROOT+"/external/nina_kirstein/*_"+experiment+"_G1_1kbMEAN.txt") ,1
if experiment in ["MCM","MCMp"]:
#print("LA")
if strain != "Hela":
return False,[],1
root = ROOT + "/external/MCM2-bed/R1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
#print(extract)
return True, extract, 1
if experiment == "Ini":
root = ROOT + "/external/ini/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.csv")
# print(extract)
cells = [e.split("/")[-1].split(".csv")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".csv")
files.sort()
return True, files, 1
if experiment == "GC":
root = ROOT + "/external//1ColProfiles/*1kbp*" # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 1
if "AT" in experiment:
root = ROOT + "/external//AT_hooks/c__%s.csv"%experiment.split("_")[1] # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 5
if experiment == "SNS":
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = []
if strain in ["K562"]:
extract = glob.glob(root + "*.bed")
elif strain in ["HeLaS3","Hela","HeLa"]:
extract=glob.glob(root + "*.csv")
#print("Strain",strain)
#print(extract, root)
if strain not in ["K562","HeLaS3"]:
print("Wrong strain")
print("Only K562")
return False, [], 1
return True, extract, 1
if experiment == "MCMo":
if strain not in ["HeLa", "HeLaS3","Hela"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
root = ROOT + "/external/MCM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "MCMbw":
if strain not in ["HeLa", "HeLaS3"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
"""
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1"""
if "G4" in experiment:
root = ROOT + "/external/G4/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "CNV":
root = ROOT + "/external/CNV/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
# print(extract)
cells = [e.split("/")[-1].split(".txt")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".txt")
files.sort()
#print(files)
return True, files, 10
if experiment == "HMM":
root = ROOT + "/external/HMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
# print(extract)
cells = [e.split("/")[-1].replace("wgEncodeBroadHmm", "").replace("HMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "wgEncodeBroadHmm%sHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 10
if experiment == "RHMM":
root = ROOT + "/external/RHMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].replace("RHMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "%sRHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 1
if experiment.startswith("OKSeq"):
root = ROOT + "/Data/UCSC/hsap_hg19//local/Okazaki_Hyrien/1kb_profiles/"
root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*")
cells = [e.split("/")[-1] for e in extract]
cells.sort()
# print(cells)
if strain in cells:
if experiment == "OKSeqo":
files = glob.glob(root + strain + "/*pol*")
if experiment == "OKSeqF":
files = glob.glob(root + strain + "/*F*")
if experiment == "OKSeqR":
files = glob.glob(root + strain + "/*R*")
if experiment in ["OKSeqS", "OKSeq"]:
# print("La")
files = glob.glob(root + strain + "/*R*")
files += glob.glob(root + strain + "/*F*")
files.sort()
return True, files, 1
if experiment == "DNaseI":
root = ROOT + "/external/DNaseI//"
print(root)
if strain == "Cerevisae":
return True, [root + "/yeast.dnaseI.tagCounts.bed"], 0.001
else:
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace("wgEncodeAwgDnaseUwduke",
"").replace("UniPk.narrowPeak", "") for e in extract]
extract2 = glob.glob(root + "../DNaseIK562/*.narrowPeak")
cells2 = [e.split("/")[-1].replace("wgEncodeOpenChromDnase",
"").replace("Pk.narrowPeak", "") for e in extract2]
extract3 = glob.glob(root + "../DNaseIK562/*.bigWig")
cells3 = [e.split("/")[-1].replace("wgEncodeUwDnase",
"").replace("Rep1.bigWig", "") for e in extract3]
# print(extract2, cells2)
extract += extract2
cells += cells2
extract += extract3
cells += cells3
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 0.001
if experiment == "Meth":
root = ROOT + "/external/methylation//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Meth450":
root = ROOT + "/external/methylation450//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Faire":
root = ROOT + "/external/Faire//"
extract = glob.glob(root + "*.pk")
cells = [e.split("/")[-1].replace("UncFAIREseq.pk", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment in Prot:
root = ROOT + "/external/DNaseI//"
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace(experiment + "narrowPeak", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
root = ROOT + "/external/proteins//"
extract = glob.glob(root + "/*.csv")
cells = [e.split("/")[-1].replace("_ORC2_miotto.csv", "") for e in extract]
if strain in cells:
files = glob.glob(root + "/%s_ORC2_miotto.csv" % strain)
return True, files, 1
if experiment in marks:
root = ROOT + "/external/histones//"
if experiment == "H2az" and strain == "IMR90":
experiment = "H2A.Z"
extract = glob.glob(root + "/*%s*.broadPeak" % experiment)
#print(extract)
if strain not in ["IMR90"]:
cells = [e.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("Std", "").replace("%sPk.broadPeak" % experiment, "") for e in extract]
# print(extract,cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdPk.broadPeak" %
(strain, experiment))
files += glob.glob(root + "/wgEncodeBroadHistone%s%sPk.broadPeak" %
(strain, experiment))
return True, files, 1
else:
cells = [e.split("/")[-1].split("-")[0] for e in
extract]
# print(extract,cells)
print("Larr")
if strain in cells:
files = glob.glob(root + "/%s-%s.broadPeak" %
(strain, experiment))
return True, files, 1
if experiment[:-3] in marks:
root = ROOT + "/external/histones//"
if strain not in ["IMR90"]:
extract = glob.glob(root + "/*%s*.bigWig" % experiment[:-3])
# print(extract)
cells = []
for c in extract:
if "StdSig" in c:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sStdSig.bigWig" % experiment[:-3], ""))
else:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sSig.bigWig" % experiment[:-3], ""))
# print(extract, cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdSig.bigWig" %
(strain, experiment[:-3]))
if files == []:
#print("Warning using Sig")
files = glob.glob(root + "/wgEncodeBroadHistone%s%sSig.bigWig" %
(strain, experiment[:-3]))
# print(files)
return True, files, 1
else:
exp = experiment[:-3]
exp = exp.replace("k","K") # from roadmap epi
extract = glob.glob(root + "/IMR90_%s*wh.csv" % exp)
print(extract)
cells = []
return True, extract, 1
print("Available cells")
pp.pprint(cells)
return False, [], None
def re_sample(x, y, start, end, resolution=1000):
resampled = np.zeros(int(end / resolution - start / resolution)) + np.nan
# print(data)
# print(resampled.shape)
for p, v in zip(x, y):
#print(v)
if not np.isnan(v):
posi = int((p - start) / resolution)
if np.isnan(resampled[min(posi, len(resampled) - 1)]):
resampled[min(posi, len(resampled) - 1)] = 0
resampled[min(posi, len(resampled) - 1)] += v
if int(posi) > len(resampled) + 1:
print("resample", posi, len(resampled))
# raise "Problem"
return np.arange(len(resampled)) * resolution + start, resampled
def cut_path(start, end, res=1):
initpos = 0 + start
delta = end - start
path = [0 + initpos]
def cond(x): return x <= end
while (initpos + delta) != int(initpos) and cond(initpos):
ddelta = int(initpos) + res - initpos
initpos += ddelta
ddelta -= initpos
path.append(initpos)
path[-1] = end
if len(path) >= 2 and path[-1] == path[-2]:
path.pop(-1)
return path
def overlap(start, end, res):
r = cut_path(start / res, end / res)
return [ri * res for ri in r]
def overlap_fraction(start, end, res):
assert(start <= end)
v = np.array(overlap(start, end, res))
deltas = (v[1:] - v[:-1]) / res
indexes = np.array(v[:-1] / res, dtype=np.int)
return deltas, indexes
def create_index_human(strain,exp,resolution=10,root="./"):
#chromlength = [248956422]
data = {iexp:[] for iexp in exp}
for chrom, length in enumerate(chromlength_human, 1):
for iexp in exp:
data[iexp].append(replication_data(strain, iexp,
chromosome=chrom, start=0,
end=length // 1000,
resolution=resolution)[1])
if iexp == "OKSeq":
data[iexp][-1] /= resolution
ran = [np.arange(len(dat)) * 1000 * resolution for dat in data[exp[0]]]
index = {"chrom": np.concatenate([["chr%i"%i]*len(xran) for i,xran in enumerate(ran,1)]),
"chromStart":np.concatenate(ran),
"chromEnd":np.concatenate(ran)}
print(root)
os.makedirs(root,exist_ok=True)
pd.DataFrame(index).to_csv(root+"/index.csv",index=False)
for iexp in exp:
index.update({"signalValue":np.concatenate(data[iexp])})
Df = pd.DataFrame(index)
Df.to_csv(root + "/%s.csv" % iexp, index=False)
def whole_genome(**kwargs):
data = []
def fl(name):
def sanit(z):
z = z.replace("/", "")
return z
if type(name) == dict:
items = list(name.items())
items.sort()
return "".join(["%s-%s" % (p, sanit(str(fl(value)))) for p, value in items])
else:
return name
redo = kwargs.pop("redo")
root = kwargs.get("root", "./")
# print("ic")
if "root" in kwargs.keys():
# print("la")
kwargs.pop("root")
name = root + "data/saved/"+fl(kwargs)
if os.path.exists(name) and not redo:
with open(name, "rb") as f:
return cPickle.load(f)
strain = kwargs.pop("strain")
experiment = kwargs.pop("experiment")
resolution = kwargs.pop("resolution")
for chrom, length in enumerate(chromlength_human, 1):
data.append(replication_data(strain, experiment,
chromosome=chrom, start=0,
end=length//1000,
resolution=resolution, **kwargs)[1])
if len(data[-1]) != int(length / 1000 / resolution - 0 / resolution):
print(strain, experiment, len(data[-1]),
int(length / 1000 / resolution - 0 / resolution))
raise
with open(name, "wb") as f:
cPickle.dump(data, f)
return data
def replication_data(strain, experiment, chromosome,
start, end, resolution, raw=False,
oData=False, bp=True, bpc=False, filename=None,
pad=False, smoothf=None, signame="signalValue"):
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1']
if experiment != "" and os.path.exists(experiment):
filename = experiment
if os.path.exists(strain) and strain.endswith("csv"):
#print(strain)
data= | pd.read_csv(strain) | pandas.read_csv |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), | u('z') | pandas.compat.u |
#!/usr/bin/env python
# coding: utf-8
import re
import requests
import pandas as pd
from bs4 import BeautifulSoup
RACE_INFO_COLUMNS = [
"race_id", # レースID
"year", # 年
"month", # 月
"day", # 日
"venue", # 開催場所
"race_number", # 何レース目
"race_name", # レース名
"course_type", # コース
"course_direction", # 左右
"course_distance", # 距離
"weather", # 天候
"course_state", # 馬場状態
]
RACE_REFUND_COLUMNS = [
"win_number", # 単勝
"win_refund",
"win_population",
"place_number", # 複勝
"place_refund",
"place_population",
"bracket_quinella_number", # 枠連
"bracket_quinella_refund",
"bracket_quinella_population",
"quinella_number", # 馬連
"quinella_refund",
"quinella_population",
"quinella_place_number", # ワイド
"quinella_place_refund",
"quinella_place_population",
"exacta_number", # 馬単
"exacta_refund",
"exacta_population",
"trio_number", # 三連複
"trio_refund",
"trio_population",
"tierce_number", # 三連単
"tierce_refund",
"tierce_population"
]
RACE_DATA_COLUMNS = [
"race_id", # レースID
"horse_id", # 馬ID
"rank", # 着順
"slot", # 枠番
"horse_num", # 馬番
"horse_name", # 馬名
"horse_gender", # 性別
"horse_age", # 年齢
"jockey_weight", # 斤量
"jockey_name", # 騎手名
"goal_time", # タイム
"last_time", # 上り
"odds", # 単勝のオッズ
"popularity", # 人気
"horse_weight", # 馬体重
"horse_weight_diff", # 馬体重の増減
"trainer", # 調教師
"prize", # 獲得賞金
"odds_place" # 複勝のオッズ
]
def get_race_ids(start_year, end_year):
years = list(range(start_year, end_year + 1))
venues = list(range(1, 11))
numbers = list(range(1, 11))
days = list(range(1, 11))
races = list(range(1, 13))
race_ids = [f"{y}{v:02}{n:02}{d:02}{r:02}" \
for y in years \
for v in venues \
for n in numbers \
for d in days \
for r in races]
return sorted(race_ids)
def get_html(race_id):
url_base = "https://db.netkeiba.com/race/"
url = url_base + str(race_id)
html = requests.get(url)
html.encoding = "EUC-JP"
soup = BeautifulSoup(html.text, "html.parser")
if soup.find_all("table", "race_table_01 nk_tb_common") == []:
return None
return soup
def get_race_info(soup, race_id):
race_date = soup.find("div", "data_intro").find("p", "smalltxt").get_text(strip=True)
race_date = re.match(r"(\d+)年(\d+)月(\d+)日.+", race_date)
venue_names = [None, "札幌", "函館", "福島", "新潟", "東京", "中山", "中京", "京都", "阪神", "小倉"]
conditions = soup.find("dl", "racedata fc").find("span")\
.get_text(strip=True).replace("\xa0", "").split("/")
course_type = ""
if "芝" in conditions[0]:
course_type += "芝"
if "ダ" in conditions[0]:
course_type += "ダ"
if "左" in conditions[0]:
course_direction = "左"
elif "右" in conditions[0]:
course_direction = "右"
elif "障" in conditions[0]:
course_direction = "障"
elif "直線" in conditions[0]:
course_direction = "直"
if course_type == "芝ダ":
states = re.match(r"芝 : (.+)ダート : (.+)", conditions[2])
course_state = states.group(1) + "/" + states.group(2)
else:
course_state = conditions[2].split(" : ")[1]
race_info = {
"race_id": [race_id],
"year": [race_date.group(1)],
"month": [race_date.group(2)],
"day": [race_date.group(3)],
"venue": [venue_names[int(race_id[4:6])]],
"race_number": [race_id[10:]],
"race_name": [soup.find("dl", "racedata fc").find("h1").get_text(strip=True)],
"course_type": [course_type],
"course_direction": [course_direction],
"course_distance": [re.match(r".+([0-9]{4})m", conditions[0]).group(1)],
"weather": [conditions[1].split(" : ")[1]],
"course_state": [course_state]
}
return race_info
def get_refunds(tables):
refunds = {}
win = tables[0].find("th", "tan") # 単勝
if win is not None:
td = win.parent.find_all("td")
refunds["win_number"] = [td[0].get_text(" ")]
refunds["win_refund"] = [td[1].get_text(" ")]
refunds["win_population"] = [td[2].get_text(" ")]
place = tables[0].find("th", "fuku".startswith("fuku")) # 複勝
if place is not None:
td = place.parent.find_all("td")
refunds["place_number"] = [td[0].get_text(" ")]
refunds["place_refund"] = [td[1].get_text(" ")]
refunds["place_population"] = [td[2].get_text(" ")]
bracket_quinella = tables[0].find("th", "waku") # 枠連
if bracket_quinella is not None:
td = bracket_quinella.parent.find_all("td")
refunds["bracket_quinella_number"] = [td[0].get_text(" ")]
refunds["bracket_quinella_refund"] = [td[1].get_text(" ")]
refunds["bracket_quinella_population"] = [td[2].get_text(" ")]
quinella = tables[0].find("th", "uren") # 馬連
if quinella is not None:
td = quinella.parent.find_all("td")
refunds["quinella_number"] = [td[0].get_text(" ")]
refunds["quinella_refund"] = [td[1].get_text(" ")]
refunds["quinella_population"] = [td[2].get_text(" ")]
quinella_place = tables[1].find("th", "wide") # ワイド
if quinella_place is not None:
td = quinella_place.parent.find_all("td")
refunds["quinella_place_number"] = [td[0].get_text(" ")]
refunds["quinella_place_refund"] = [td[1].get_text(" ")]
refunds["quinella_place_population"] = [td[2].get_text(" ")]
exacta = tables[1].find("th", "utan") # 馬単
if exacta is not None:
td = exacta.parent.find_all("td")
refunds["exacta_number"] = [td[0].get_text(" ")]
refunds["exacta_refund"] = [td[1].get_text(" ")]
refunds["exacta_population"] = [td[2].get_text(" ")]
trio = tables[1].find("th", "sanfuku") # 三連複
if trio is not None:
td = trio.parent.find_all("td")
refunds["trio_number"] = [td[0].get_text(" ")]
refunds["trio_refund"] = [td[1].get_text(" ")]
refunds["trio_population"] = [td[2].get_text(" ")]
tierce = tables[1].find("th", "santan") # 三連単
if tierce is not None:
td = tierce.parent.find_all("td")
refunds["tierce_number"] = [td[0].get_text(" ")]
refunds["tierce_refund"] = [td[1].get_text(" ")]
refunds["tierce_population"] = [td[2].get_text(" ")]
return refunds
def get_race_records(table, race_refunds, race_id):
records = {column:[] for column in RACE_DATA_COLUMNS}
for i in range(1, len(table)):
row = table[i].find_all("td")
rank = row[0].get_text(strip=True)
if not rank.isdecimal():
continue
horse_num = row[2].get_text(strip=True)
weight = re.match(r"(\d+)\((\D*\d+)\)", row[14].get_text(strip=True))
horse_weight = ""
horse_weight_diff = ""
if weight is not None:
horse_weight = weight.group(1)
horse_weight_diff = weight.group(2)
odds_place = None
place_numbers = race_refunds["place_number"][0].split(" ")
place_refunds = race_refunds["place_refund"][0].split(" ")
for i in range(len(place_numbers)):
if horse_num == place_numbers[i]:
odds_place = int(place_refunds[i].replace(",", "")) / 100
records["race_id"] += [race_id]
records["horse_id"] += [row[3].find("a").get("href").split("/")[2]]
records["rank"] += [rank]
records["slot"] += [row[1].get_text(strip=True)]
records["horse_num"] += [horse_num]
records["horse_name"] += [row[3].get_text(strip=True)]
records["horse_gender"] += [row[4].get_text(strip=True)[0]]
records["horse_age"] += [row[4].get_text(strip=True)[1:]]
records["jockey_weight"] += [row[5].get_text(strip=True)]
records["jockey_name"] += [row[6].get_text(strip=True)]
records["goal_time"] += [to_sec(row[7].get_text(strip=True))]
records["last_time"] += [row[11].get_text(strip=True)]
records["odds"] += [row[12].get_text(strip=True)]
records["popularity"] += [row[13].get_text(strip=True)]
records["horse_weight"] += [horse_weight]
records["horse_weight_diff"] += [horse_weight_diff]
records["trainer"] += [row[18].get_text(strip=True)]
records["prize"] += [row[20].get_text(strip=True)]
records["odds_place"] += [odds_place]
return | pd.DataFrame.from_dict(records) | pandas.DataFrame.from_dict |
'''
Portfolio Analysis
This module is used for portfolio analysis
which is divided into 4 steps
1. select breakpoints
2. distribute the assets into groups
3. calculate the average and difference of groups
4. present the result
'''
from calendar import c
from msilib.schema import Error
from unicodedata import decimal
from unittest import expectedFailure
from click import group
from numpy import average, dtype
from pyrsistent import v
from sklearn.model_selection import GroupShuffleSplit
from typesentry import I
class ptf_analysis():
def __init__(self) :
pass
def select_breakpoints(self,character,number,perc=None):
'''
select the breakpoints of the sample
input :
character: used to be divided
number: the number of the breakpoint and the number of interval is number+1
perc(None): if perc is true and a list of number, it represents the percentage setted to divided the sample.
once it is setted, then the number is invalid
output:
the rows of samples is realized data
the columns of sampple are characters
breakpoint: the selected breakpoint
'''
import numpy as np
# create percentiles
if perc == None:
perc = np.linspace(0, 100, number+2, dtype=int)
elif perc is not None :
perc = perc
breakpoint = np.percentile(character, perc,axis=0)
return breakpoint
def distribute(self, character, breakpoint):
'''
split the character into groups
input:
character: character used to divided into groups
breakpoint: the breakpoint for dividing samples
output:
label: return a label column for character
'''
import numpy as np
r = len(character)
label = np.zeros((r, 1))
for i in range(len(breakpoint) - 1):
label[np.where((character >= breakpoint[i]) & (character < breakpoint[i+1]) & (i+1 < len(breakpoint) - 1))] = i
label[np.where((character >= breakpoint[i]) & (character <= breakpoint[i+1]) & (i+1 == len(breakpoint) - 1))] = i
return label
def average(self, sample_return, label, cond='uni', weight=None):
'''
calculate the average return for each group
input: sample_return: sample forecasted return
label: group label
output: average value of groups
'''
import numpy as np
if cond == 'uni' :
# the whole group label, eg. 10 group lables: [1,2,3,4,5,6,7,8,9,10]
temp_label = np.sort(np.unique(label))
# the average return of each group
average_return = np.zeros((len(temp_label), 1))
# calculate the average return of each group through matching the sample_return's
# label with the group label and the sample_return is Forecasted Return
for i in range(len(temp_label)):
if weight is None:
average_return[i, 0] = np.mean(sample_return[np.where(label==temp_label[i])])
else:
standard_weight = weight[np.where(label==temp_label[i])] / np.sum(weight[np.where(label==temp_label[i])])
average_return[i, 0] = np.sum(standard_weight * sample_return[np.where(label==temp_label[i])])
# return average value of each group
return average_return
if cond == 'bi' :
# the whole group label, eg. 10 group labels: [1,2,3,4,5,6,7,8,9,10]
temp_label_row = np.sort(np.unique(label[0]))
temp_label_col = np.sort(np.unique(label[1]))
# the average return of each group
average_return = np.zeros((len(temp_label_row), len(temp_label_col)))
# calculate the average return of each group through matching the sample_return's
# label with the group label and the sample_return is Forecasted Return
for i in range(len(temp_label_row)):
for j in range(len(temp_label_col)):
if weight is None:
average_return[i, j] = np.mean(sample_return[np.where((label[0]==temp_label_row[i])&(label[1]==temp_label_col[j]))])
else:
standard_weight = weight[np.where((label[0]==temp_label_row[i])&(label[1]==temp_label_col[j]))] / np.sum(weight[np.where((label[0]==temp_label_row[i])&(label[1]==temp_label_col[j]))])
average_return[i, j] = np.sum(standard_weight * sample_return[np.where((label[0]==temp_label_row[i])&(label[1]==temp_label_col[j]))])
# return average value of each group
return average_return
def statistics(self, variable, label, func, cond='uni'):
import numpy as np
# the whole group label, eg. 10 group labels: [1,2,3,4,5,6,7,9,10]
temp_label = np.sort(np.unique(label))
# initialize average value of variable
try:
r, c = np.shape(variable)
except:
r = len(variable)
c = 0
if c == 0 :
average_statistics = np.zeros((len(temp_label)))
# calculate the average variable value of each group through matching the variable's
# label with the group label.
for i in range(len(temp_label)):
average_statistics[i] = func(variable[np.where(label==temp_label[i])])
return average_statistics
else :
average_statistics = np.zeros((len(temp_label), c))
for i in range(len(temp_label)):
for j in range(c):
average_statistics[i, j] = func(variable[np.where(label==temp_label[i]), j])
return average_statistics
class Univariate(ptf_analysis):
def __init__(self, sample, number, perc=None, maxlag=12, weight=False):
'''
input: sample: the samples to be analyzed
sample usually contain the future return, characteristics, time
the DEFAULT settting is the First column is the forecast return and
the second colunm is the characteristic
the third colunm or the index is time label
number: the breakpoint number
perc: the breakpoint percentiles
factor: risk adjust factor
maxlag: maximum lag for Newey-West adjustment
'''
import numpy as np
if type(sample).__name__ == 'DataFrame':
self.sample = np.array(sample)
self._sample_type = 'DataFrame'
self._columns = list(sample.columns)
elif type(sample).__name__ == 'ndarray':
self.sample = sample
self._sample_type = 'ndarray'
else:
IOError
self.number = number
if perc is not None :
self.number = len(perc) - 2
self.perc = perc
self.maxlag = maxlag
self._factor = None
self._time = np.sort(np.unique(self.sample[:, 2]))
self._weight = weight
def divide_by_time(self, sample):
'''
split the sample by time into groups 将样本按照时间分组
output: groups_by_time (list) 按时间分组的样本
'''
import numpy as np
time = np.sort(np.unique(sample[:, 2]))
groups_by_time = list()
for i in range(len(time)):
groups_by_time.append(sample[np.where(sample[:, 2]==time[i])])
return groups_by_time
def average_by_time(self):
'''
average of the groups at each time point
'''
import numpy as np
# get the sample groups by time 得到按时间分组的样本
groups_time = self.divide_by_time(self.sample)
# generate table of average return for group and time 生成组和时间的表格
# Rows: groups 行: 组
# Columns: Time 列: 时间
average_group_time = np.zeros((self.number+1, len(groups_time)))
self._label = list()
for i in range(len(groups_time)):
# for each time, a group exist
group = groups_time[i]
# for each time, generate breakpoint
breakpoint = super().select_breakpoints(group[:, 1], self.number, self.perc)
# for each time, generate label
label = super().distribute(group[:, 1], breakpoint)
# for each group in each time, calculate the average future return
if self._weight == False:
average_group_time[:, i] = super().average(group[:, 0], label[:, 0]).reshape((self.number+1))
elif self._weight == True:
average_group_time[:, i] = super().average(group[:, 0], label[:, 0], weight=group[:, 3]).reshape((self.number+1))
else: return IOError
self._label.append(label[:, 0])
# return the Table
# Rows: groups in each time
# Columns: Time
return average_group_time
def difference(self,average_group):
'''
calculate the difference group return
input : average_group : Average group at each time(MATRIX: N*T)
output: the matrix added with the difference group return
'''
import numpy as np
diff = average_group[-1, :] - average_group[0, :]
diff = diff.reshape((1, len(diff)))
result = np.append(average_group, diff, axis=0)
return result
def summary_and_test(self) :
'''
summary the result and take t test
'''
import numpy as np
from scipy import stats
self.result = self.difference(self.average_by_time())
self.average = np.mean(self.result, axis=1)
self.ttest = stats.ttest_1samp(self.result, 0.0, axis=1)
# if there is a facotr adjustment, then normal return result plus the anomaly result
# if self.factor is not None :
# self.alpha, self.alpha_tvalue = self.factor_adjustment(self.result)
# return self.average, self.ttest, self.alpha, self.alpha_tvalue
return self.average, self.ttest
def fit(self) :
'''
fit the model
'''
self.summary_and_test()
def factor_adjustment(self, factor):
'''
factor adjustment 因子调整
input: reuslt: Return Table with difference sequence
factor: factor order by time
output: alpha 超额收益
ttest T统计量
'''
import statsmodels.api as sm
import numpy as np
self._factor = factor
# take the inverse of the Table with difference
# Rows: Time
# Columns: Groups Return
table = self.result.T
row, col = np.shape(table)
# generate anomaly: alpha
# generate tvalues: ttest
alpha = np.zeros((col, 1))
ttest = np.zeros((col, 1))
# factor adjusment
for i in range(col):
model = sm.OLS(table[:,i], sm.add_constant(factor))
# fit the model with the Newey-West Adjusment
# lags=maxlag
re = model.fit()
re = re.get_robustcov_results(cov_type='HAC', maxlags=self.maxlag, use_correction=True)
#print(re.summary())
# get anomaly and tvalues
alpha[i] = re.params[0]
ttest[i] = re.tvalues[0]
self.alpha = alpha
self.alpha_tvalue = ttest
return alpha, ttest
def summary_statistics(self, variables=None, periodic=False):
'''
Summary Statistics 描述性统计
input : variables(ndarray/DataFrame) 除了排序变量之外的其他需要分组总结的数据
periodic(boolean) 是否报告每一期数据
'''
import numpy as np
from scipy import stats as sts
from prettytable import PrettyTable
'''
Group Statistics
'''
# Group Statistics
if variables is None:
groups_time = self.divide_by_time(self.sample)
average_variable_period = np.zeros((self.number+1, 1, len(groups_time)))
elif variables is not None:
try:
r, c = np.shape(variables)
except:
c = 1
temp_sample = np.c_[self.sample, variables]
groups_time = self.divide_by_time(temp_sample)
average_variable_period = np.zeros((self.number+1, c+1, len(groups_time)))
# ccalculate average variables
for i in range(len(groups_time)):
group = groups_time[i]
if variables is None:
average_variable_period[:, 0, i] = super().statistics(group[:, 1], self._label[i], np.mean)
elif variables is not None:
average_variable_period[:, 0, i] = super().statistics(group[:, 1], self._label[i], np.mean)
average_variable_period[:, 1:, i] = super().statistics(group[:, -c:], self._label[i], np.mean)
# print the result
table = PrettyTable()
if periodic == True:
table.field_names = ['Time'] + [str(i+1) for i in range(self.number+1)]
for i in range(len(self._time)):
table.add_row([self._time[i]] + list(np.around(average_variable_period[:, 0, i], decimals=5)))
if type(variables).__name__ == 'DataFrame' or type(variables).__name__ == 'Series':
if type(variables).__name__ == 'DataFrame':
variables_col = list(variables.columns)
elif type(variables).__namr__ == 'Series':
variables_col = list(variables.name)
for j in range(c):
table.add_row([variables_col[j]] + [' ' for i in range(self.number+1)])
for i in range(len(self._time)):
table.add_row([self._time[i]] + list(np.around(average_variable_period[:, j+1, i], decimals=5)))
elif type(variables).__name__ == 'ndarray' :
for j in range(c):
table.add_row(['Variable'+str(j+1)]+[' ' for i in range(self.number+1)])
for i in range(len(self._time)):
table.add_row([self._time[i]] + list(np.around(average_variable_period[:, j+1, i], decimals=5)))
elif periodic == False:
average_variable = np.mean(average_variable_period, axis=2)
table.field_names = ['Variable'] + [str(i+1) for i in range(self.number+1)]
if self._sample_type == 'DataFrame':
table.add_row([self._columns[1]] + list(np.around(average_variable[:, 0], decimals=5)))
else: table.add_row(['Sort Variable'] + list(np.around(average_variable[:, 0], decimals=5)))
if type(variables).__name__ == 'DataFrame' or type(variables).__name__ == 'Series':
if type(variables).__name__ == 'DataFrame':
variables_col = list(variables.columns)
elif type(variables).__name__ == 'Series':
variables_col = [variables.name]
for j in range(c):
table.add_row([variables_col[j]] + list(np.around(average_variable[:, j+1], decimals=5)))
elif type(variables).__name__ == 'ndarray' :
for j in range(c):
table.add_row(['Variable'+str(j+1)] + list(np.around(average_variable[:, j+1], decimals=5)))
else:
return IOError
print('\nGroup Statistics')
print(table)
'''
Variable Statistics
'''
# Variable Statistics
table = PrettyTable()
if periodic == True:
table.field_names = ['Time', 'Mean', 'SD', 'Skew', 'Kurt', 'Min', 'P5', 'P25', 'Median', 'P75', 'P95', 'Max', 'n']
for i in range(len(groups_time)):
group = groups_time[i]
stats_mean = np.mean(group[:, 1])
stats_std = np.std(group[:, 1])
stats_skew = sts.skew(group[:, 1])
stats_kurt = sts.kurtosis(group[:, 1])
stats_min = np.min(group[:, 1])
stats_perc5 = np.percentile(group[:, 1] ,5)
stats_perc25 = np.percentile(group[:, 1], 25)
stats_perc50 = np.percentile(group[:, 1], 50)
stats_perc75 = np.percentile(group[:, 1], 75)
stats_perc95 = np.percentile(group[:, 1], 95)
stats_max = np.max(group[:, 1])
stats_n = len(group[:, 1])
table.add_row([self._time[i]] + list(np.around([stats_mean, stats_std, stats_skew, stats_kurt, stats_min, stats_perc5, stats_perc25, stats_perc50, stats_perc75, stats_perc95, stats_max, stats_n], decimals=5)))
if type(variables).__name__ == 'DataFrame' or type(variables).__name__ == 'Series':
if type(variables).__name__ == 'DataFrame':
variables_col = list(variables.columns)
elif type(variables).__name__ == 'Series':
variables_col = list(variables.name)
for j in range(c):
table.add_row([variables_col[j]] + [' ' for k in range(12)])
for i in range(len(groups_time)):
group = groups_time[i]
stats_mean = np.mean(group[:, -(c-j)])
stats_std = np.std(group[:, -(c-j)])
stats_skew = sts.skew(group[:, -(c-j)])
stats_kurt = sts.kurtosis(group[:, -(c-j)])
stats_min = np.min(group[:, -(c-j)])
stats_perc5 = np.percentile(group[:, -(c-j)] ,5)
stats_perc25 = np.percentile(group[:, -(c-j)], 25)
stats_perc50 = np.percentile(group[:, -(c-j)], 50)
stats_perc75 = np.percentile(group[:, -(c-j)], 75)
stats_perc95 = np.percentile(group[:, -(c-j)], 95)
stats_max = np.max(group[:, -(c-j)])
stats_n = len(group[:, -(c-j)])
table.add_row([self._time[i]] + list(np.around([stats_mean, stats_std, stats_skew, stats_kurt, stats_min, stats_perc5, stats_perc25, stats_perc50, stats_perc75, stats_perc95, stats_max, stats_n], decimals=5)))
elif type(variables).__name__ == 'ndarray':
for j in range(c):
table.add_row(['Variable'+str(j+1)] + [' ' for k in range(12)])
for i in range(len(groups_time)):
group = groups_time[i]
stats_mean = np.mean(group[:, -(c-j)])
stats_std = np.std(group[:, -(c-j)])
stats_skew = sts.skew(group[:, -(c-j)])
stats_kurt = sts.kurtosis(group[:, -(c-j)])
stats_min = np.min(group[:, -(c-j)])
stats_perc5 = np.percentile(group[:, -(c-j)] ,5)
stats_perc25 = np.percentile(group[:, -(c-j)], 25)
stats_perc50 = np.percentile(group[:, -(c-j)], 50)
stats_perc75 = np.percentile(group[:, -(c-j)], 75)
stats_perc95 = np.percentile(group[:, -(c-j)], 95)
stats_max = np.max(group[:, -(c-j)])
stats_n = len(group[:, -(c-j)])
table.add_row([self._time[i]] + list(np.around([stats_mean, stats_std, stats_skew, stats_kurt, stats_min, stats_perc5, stats_perc25, stats_perc50, stats_perc75, stats_perc95, stats_max, stats_n], decimals=5)))
elif periodic == False:
table.field_names = ['Variable', 'Mean', 'SD', 'Skew', 'Kurt', 'Min', 'P5', 'P25', 'Median', 'P75', 'P95', 'Max', 'n']
stats_mean = np.mean(self.sample[:, 1])
stats_std = np.std(self.sample[:, 1])
stats_skew = sts.skew(self.sample[:, 1])
stats_kurt = sts.kurtosis(self.sample[:, 1])
stats_min = np.min(self.sample[:, 1])
stats_perc5 = np.percentile(self.sample[:, 1] ,5)
stats_perc25 = np.percentile(self.sample[:, 1], 25)
stats_perc50 = np.percentile(self.sample[:, 1], 50)
stats_perc75 = np.percentile(self.sample[:, 1], 75)
stats_perc95 = np.percentile(self.sample[:, 1], 95)
stats_max = np.max(self.sample[:, 1])
stats_n = int(len(self.sample[:, 1]) / len(self._time))
if self._sample_type == 'DataFrame':
table.add_row([self._columns[1]] + list(np.around([stats_mean, stats_std, stats_skew, stats_kurt, stats_min, stats_perc5, stats_perc25, stats_perc50, stats_perc75, stats_perc95, stats_max, stats_n], decimals=5)))
else:
table.add_row(['Sort Variable'] + list(np.around([stats_mean, stats_std, stats_skew, stats_kurt, stats_min, stats_perc5, stats_perc25, stats_perc50, stats_perc75, stats_perc95, stats_max, stats_n], decimals=5)))
if type(variables).__name__ == 'DataFrame' or type(variables).__name__ == 'Series':
if type(variables).__name__ == 'DataFrame':
variables_col = list(variables.columns)
elif type(variables).__name__ == 'Series':
variables_col = list([variables.name])
stats_mean = np.mean(temp_sample[:, -c:], axis=0)
stats_std = np.std(temp_sample[:, -c:], axis=0, dtype=float)
if c > 1:
stats_skew = sts.skew(variables.iloc[:, -c:], axis=0)
elif c == 1:
stats_skew = [sts.skew(variables)]
stats_kurt = sts.kurtosis(temp_sample[:, -c:], axis=0)
stats_min = np.min(temp_sample[:, -c:], axis=0)
stats_perc5 = np.percentile(temp_sample[:, -c:], 5, axis=0)
stats_perc25 = np.percentile(temp_sample[:, -c:], 25, axis=0)
stats_perc50 = np.percentile(temp_sample[:, -c:], 50, axis=0)
stats_perc75 = np.percentile(temp_sample[:, -c:], 75, axis=0)
stats_perc95 = np.percentile(temp_sample[:, -c:], 95, axis=0)
stats_max = np.max(temp_sample[:, -c:], axis=0)
stats_n = int(len(temp_sample[:, 1]) / len(self._time))
for j in range(c):
table.add_row([variables_col[j]] + list(np.around([stats_mean[j], stats_std[j], stats_skew[j], stats_kurt[j], stats_min[j], stats_perc5[j], stats_perc25[j], stats_perc50[j], stats_perc75[j], stats_perc95[j], stats_max[j], stats_n], decimals=5)))
elif type(variables).__name__ == 'ndarray':
stats_mean = np.mean(temp_sample[:, -c:], axis=0)
stats_std = np.std(temp_sample[:, -c:], axis=0, dtype=float)
if c > 1:
stats_skew = sts.skew(variables[:, -c:], axis=0)
elif c == 1:
stats_skew = [sts.skew(variables)]
stats_kurt = sts.kurtosis(temp_sample[:, -c:], axis=0)
stats_min = np.min(temp_sample[:, -c:], axis=0)
stats_perc5 = np.percentile(temp_sample[:, -c:], 5, axis=0)
stats_perc25 = np.percentile(temp_sample[:, -c:], 25, axis=0)
stats_perc50 = np.percentile(temp_sample[:, -c:], 50, axis=0)
stats_perc75 = np.percentile(temp_sample[:, -c:], 75, axis=0)
stats_perc95 = np.percentile(temp_sample[:, -c:], 95, axis=0)
stats_max = np.max(temp_sample[:, -c:], axis=0)
stats_n = int(len(temp_sample[:, 1]) / len(self._time))
for j in range(c):
table.add_row(['Variable'+str(j+1)] + list(np.around([stats_mean[j], stats_std[j], stats_skew[j], stats_kurt[j], stats_min[j], stats_perc5[j], stats_perc25[j], stats_perc50[j], stats_perc75[j], stats_perc95[j], stats_max[j], stats_n], decimals=5)))
else:
return IOError
# print the result
print('\nIndicator Statistics')
print(table)
def correlation(self, variables, periodic=False, export=False):
# Variable Statistics
# input:
# variables (ndarray\DataFrame)
import numpy as np
from prettytable import PrettyTable
from scipy import stats as sts
r, c = np.shape(variables)
temp_sample = np.c_[self.sample, variables]
groups_time = self.divide_by_time(temp_sample)
table = PrettyTable()
table_spear = PrettyTable()
# create field name
if type(variables).__name__ == 'DataFrame':
variables_col = list(variables.columns)
field_name = list()
for i in range(len(variables_col)):
for j in range(len(variables_col)):
if j > i:
field_name.append(variables_col[i]+'&'+variables_col[j])
elif type(variables).__name__ == 'ndarray':
r , c =np.shape(variables)
variables_col = ['Variable_' + str(i) for i in range(c)]
field_name = list()
for i in range(len(variables_col)):
for j in range(len(variables_col)):
if j > i :
field_name.append(variables_col[i]+' & '+variables_col[j])
if periodic == True:
table.field_names = ['Time'] + field_name
table_spear.field_names = ['Time'] + field_name
elif periodic == False:
table.field_names = ['Variable'] + field_name
# calculate correlation coefficient
corr_maxtrix = np.zeros((len(groups_time), len(field_name)))
corr_maxtrix_spearman = np.zeros((len(groups_time), len(field_name)))
for i in range(len(groups_time)):
group = groups_time[i]
temp_variables = group[:, -c:]
corr = list()
corr_spearman = list()
for j in range(c):
for k in range(c):
if k > j :
corr.append(np.around(sts.pearsonr(temp_variables[:, j], temp_variables[:, k])[0], decimals=5))
corr_spearman.append(np.around(sts.spearmanr(temp_variables[:, j], temp_variables[:, k])[0], decimals=5))
corr_maxtrix[i, :] = corr
corr_maxtrix_spearman[i, :] = corr_spearman
if periodic == True:
table.add_row([str(self._time[i]) + ' '] + corr)
table_spear.add_row([str(self._time[i]) + ' '] + corr_spearman)
if periodic == False:
table.add_rows([['Pearson'] + list(np.around(np.mean(corr_maxtrix, axis=0), decimals=5))])
table.add_row(['Spearman'] + list(np.around(np.mean(corr_maxtrix_spearman, axis=0), decimals=5)))
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
return df
elif periodic == True:
print('Spearman Correlation')
print(table_spear)
print('Pearson Correlation')
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
csv_string_spear = table_spear.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
with StringIO(csv_string_spear) as f_spear:
df_spear = pd.read_csv(f_spear)
return df, df_spear
def print_summary_by_time(self, export=False) :
'''
print summary_by_time
'''
import numpy as np
from prettytable import PrettyTable
r, c = np.shape(self.result)
table = PrettyTable()
time = np.sort(np.unique(self.sample[:, 2]))
table.add_column('Time', time)
for i in range(r-1):
table.add_column(str(i+1), np.around(self.result[i, :], decimals=3))
table.add_column('diff', np.around(self.result[r-1,:], decimals=3))
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
return df
def print_summary(self, export=False):
'''
print summary
'''
import numpy as np
from prettytable import PrettyTable
# generate Table if no factor
table = PrettyTable()
table.add_column('Group', ['Average', 'T-Test'])
for i in range(self.number+1):
table.add_column(str(i+1), np.around([self.average[i], self.ttest[0][i]], decimals=3))
table.add_column('Diff', np.around([self.average[-1], self.ttest[0][-1]], decimals=3))
if self._factor is not None :
table = PrettyTable()
table.add_column('Group', ['Average', 'T-Test', 'Alpha', 'Alpha-T'])
for i in range(self.number+1):
table.add_column(str(i+1), np.around([self.average[i], self.ttest[0][i], self.alpha[i][0], self.alpha_tvalue[i][0]], decimals=3))
table.add_column('Diff', np.around([self.average[-1], self.ttest[0][-1], self.alpha[-1][0], self.alpha_tvalue[-1][0]], decimals=3))
np.set_printoptions(formatter={'float':'{:0.3f}'.format})
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
return df
class Bivariate(ptf_analysis):
def __init__(self, sample, number=5, perc_row=None, perc_col=None, weight=False, maxlag=12):
'''
input: sample: the samples to be analyzed
sample usually contain the future return, characteristics, time
the DEFAULT settting:
the First column is the forecast return and
the second column is the row characteristic
the third column is the column characteristic
the fourth colunm or the index is time label
number: the breakpoint number
perc: the breakpoint percentiles
factor: risk adjust factor
maxlag: maximum lag for Newey-West adjustment
'''
import numpy as np
if type(sample).__name__ == 'DataFrame' :
self.sample = np.array(sample)
self._sample_type = 'DataFrame'
self._columns = list(sample.columns)
elif type(sample).__name__ == 'ndarray' :
self.sample = sample
self._sample_type = 'ndarray'
else:
IOError
self.number = number
self._factor = None
self.maxlag = maxlag
self.weight = weight
self.perc_row = perc_row
self.perc_col = perc_col
if (perc_row is not None) and (perc_col is not None):
self.perc_sign = True
else:
self.perc_sign = False
def divide_by_time(self):
'''
split the sample by time into groups 将样本按照时间分组
output: groups_by_time (list) 按时间分组的样本
'''
import numpy as np
time=np.sort(np.unique(self.sample[:, 3]))
groups_by_time=list()
for i in range(len(time)):
groups_by_time.append(self.sample[np.where(self.sample[:, 3]==time[i])])
return groups_by_time
def average_by_time(self, conditional=False):
'''
average of the groups at each time point
'''
import numpy as np
# get the sample groups by time 得到按时间分组的样本
groups_time = self.divide_by_time()
# generate table of average return for group and time 生成组和时间的表格
# Rows: groups 行: 组
# Columns: Time 列: 时间
if self.perc_sign == False:
average_group_time = np.zeros((self.number+1, self.number+1, len(groups_time)))
elif self.perc_sign == True :
average_group_time = np.zeros((len(self.perc_row)-1, len(self.perc_col)-1, len(groups_time)))
for i in range(len(groups_time)):
# for each time, there exists a group
group = groups_time[i]
# for each time, generate breakpoint
breakpoint_row = super().select_breakpoints(group[:, 1], self.number, self.perc_row)
# for each time, generate label
label_row = super().distribute(group[:, 1], breakpoint_row)[:, 0]
if conditional == False:
breakpoint_col = super().select_breakpoints(group[:, 2], self.number, self.perc_col)
label_col = super().distribute(group[:, 2], breakpoint_col)[:, 0]
elif conditional == True:
label_row_unique = list(np.unique(label_row))
label_col = - np.ones(len(group[:, 2]))
for j in range(len(label_row_unique)):
breakpoint_col = super().select_breakpoints(group[:, 2][np.where(label_row==label_row_unique[j])], self.number, self.perc_col)
label_col[np.where(label_row==label_row_unique[j])] = super().distribute(group[:, 2][np.where(label_row==label_row_unique[j])], breakpoint_col)[:, 0]
# for each group in each time, calculate the average future return
label = [label_row, label_col]
if self.perc_sign == False:
if self.weight == False:
average_group_time[:,:,i] = super().average(group[:, 0], label, cond='bi').reshape((self.number+1, self.number+1))
else:
average_group_time[:,:,i] = super().average(group[:, 0], label, cond='bi', weight=group[:, -1]).reshape((self.number+1, self.number+1))
elif self.perc_sign == True:
if self.weight == False:
average_group_time[:,:,i] = super().average(group[:, 0], label, cond='bi').reshape((len(self.perc_row)-1, len(self.perc_col)-1))
else:
average_group_time[:,:,i] = super().average(group[:, 0], label, cond='bi', weight=group[:, -1]).reshape((len(self.perc_row)-1, len(self.perc_col)-1))
# return the Table
# Rows: groups in each time
# Columns: Time
return average_group_time
def difference(self, average_group):
'''
calculate the difference group return
input : average_group : Average group at each time(MATRIX: N*T)
output: the matrix added with the difference group return
'''
import numpy as np
a, b, c= np.shape(average_group)
diff_row = average_group[-1, :, :] - average_group[0, :, :]
diff_row = diff_row.reshape((1, b, c))
result = np.append(average_group, diff_row, axis=0)
diff_col = result[:, -1, :] - result[:, 0, :]
diff_col = diff_col.reshape((a+1, 1, c))
result = np.append(result, diff_col, axis=1)
return result
def factor_adjustment(self, factor):
'''
factor adjustment 因子调整
input: reuslt: Return Table with difference sequence
factor: factor order by time
output: alpha 超额收益
ttest T统计量
'''
import statsmodels.api as sm
import numpy as np
self._factor = factor
# result: r * c * n
r, c, n = np.shape(self.result)
# generate anomaly: alpha
# generate tvalues: ttest
alpha = np.zeros((r, c))
ttest = np.zeros((r, c))
# factor adjusment
for i in range(r):
for j in range(c):
model = sm.OLS(self.result[i, j, :], sm.add_constant(factor))
# fit the model with the Newey-West Adjusment
# lags=maxlag
re = model.fit()
re = re.get_robustcov_results(cov_type='HAC', maxlags=self.maxlag, use_correction=True)
#print(re.summary())
# get anomaly and tvalues
alpha[i, j] = re.params[0]
ttest[i, j] = re.tvalues[0]
self.alpha = alpha
self.alpha_tvalue = ttest
return alpha, ttest
def summary_and_test(self, **kwargs) :
'''
summary the result and take t test
'''
import numpy as np
from scipy import stats
self.result = self.difference(self.average_by_time(**kwargs))
self.average = np.mean(self.result, axis=2)
self.ttest = stats.ttest_1samp(self.result, 0.0, axis=2)
# if there is a facotr adjustment, then normal return result plus the anomaly result
# if self.factor is not None :
# self.alpha, self.alpha_tvalue = self.factor_adjustment(self.result)
# return self.average, self.ttest, self.alpha, self.alpha_tvalue
return self.average,self.ttest
def fit(self, **kwargs):
self.summary_and_test(**kwargs)
def print_summary_by_time(self, export=False) :
'''
print summary_by_time
'''
import numpy as np
from prettytable import PrettyTable
r, c, n = np.shape(self.result)
table = PrettyTable()
time = np.sort(np.unique(self.sample[:, 3]))
table.field_names = ['Time', 'Group'] + [str(i+1) for i in range(self.number+1)] + ['Diff']
for i in range(n):
for j in range(r):
if j == 0 :
temp = [time[i], j+1]
elif j == r - 1 :
temp = [' ', 'Diff']
else :
temp = [' ', j+1]
for k in range(c):
temp.append(np.round(self.result[j, k, i], decimals=3))
table.add_row(temp)
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
return df
def print_summary(self, export=False):
'''
print summary
'''
import numpy as np
from prettytable import PrettyTable
# generate Table if no factor
if self._factor is None :
table=PrettyTable()
if self._sample_type == 'ndarray':
table.field_names = ['Group'] + [i+1 for i in range(self.number+1)] + ['Diff']
elif self._sample_type == 'DataFrame':
table.field_names = ['Group'] + [self._columns[2] + str(i+1) for i in range(self.number+1)] + ['Diff']
for i in range(self.number+2):
if i == self.number+1 :
temp = ['Diff']
temp_tvalue = [' ']
else:
if self._sample_type == 'ndarray':
temp = [str(i+1)]
elif self._sample_type == 'DataFrame':
temp = [self._columns[1] + str(i+1)]
temp_tvalue = [' ']
for j in range(self.number+2):
temp.append(np.around(self.average[i, j], decimals=3))
temp_tvalue.append(np.around(self.ttest[0][i, j], decimals=3))
table.add_row(temp)
table.add_row(temp_tvalue)
elif self._factor is not None :
table = PrettyTable()
if self._sample_type == 'ndarray':
table.field_names = ['Group'] + [i+1 for i in range(self.number+1)] + ['Diff']
elif self._sample_type == 'DataFrame':
table.field_names = ['Group'] + [self._columns[2] + str(i+1) for i in range(self.number+1)] + ['Diff']
for i in range(self.number+2):
if i == self.number+1:
temp = ['Diff']
temp_tvalue = [' ']
temp_fac = ['alpha']
temp_fac_tvalue = [' ']
else :
if self._sample_type == 'ndarray':
temp = [str(i+1)]
elif self._sample_type == 'DataFrame':
temp = [self._columns[1] + str(i+1)]
temp_tvalue = [' ']
temp_fac = ['alpha']
temp_fac_tvalue = [' ']
for j in range(self.number+2):
temp.append(np.around(self.average[i, j], decimals=3))
temp_tvalue.append(np.around(self.ttest[0][i, j], decimals=3))
temp_fac.append(np.around(self.alpha[i, j], decimals=3))
temp_fac_tvalue.append(np.around(self.alpha_tvalue[i, j], decimals=3))
table.add_row(temp)
table.add_row(temp_tvalue)
table.add_row(temp_fac)
table.add_row(temp_fac_tvalue)
np.set_printoptions(formatter={'float':'{:0.3f}'.format})
print(table)
if export == True :
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
csv_string = table.get_csv_string()
with StringIO(csv_string) as f:
df = pd.read_csv(f)
return df
class Persistence():
'''
This class is for persistence analysis
'''
def __init__(self, sample):
'''
Input :
sample (DataFrame):
first column : sample number
second column : timestamp
variables: other columns
'''
import numpy as np
self.sample = sample
self._columns = sample.columns
self._r, self._c = np.shape(self.sample)
def _shift(self, series, lag):
'''
Input :
series, lags
'''
lag_series = series.groupby([self._columns[0]]).shift(-lag)
lag_series.name = series.name + str(lag)
return lag_series
def fit(self, lags):
import pandas as pd
temp_sample = self.sample.set_index([self._columns[0], self._columns[1]]).sort_index()
def autocorr(x):
import numpy as np
return np.corrcoef(x.iloc[:, 0], x.iloc[:, 1])[0, 1]
variable_autocorr = list()
for lag_num in range(len(lags)):
for i in range(self._c-2):
temp_shift = self._shift(temp_sample.iloc[:, i], lags[lag_num])
temp_merge = pd.merge(temp_sample.iloc[:, i], temp_shift, left_index=True, right_index=True).dropna()
temp_autocorr = temp_merge.groupby([self._columns[1]])[self._columns[i+2], temp_shift.name].apply(autocorr)
variable_autocorr.append(temp_autocorr)
self._lag = lags
self._variable_autocorr = | pd.concat(variable_autocorr, axis=1) | pandas.concat |
import pandas as pd
from milvus import Milvus, IndexType, MetricType
from ipykernel.kernelbase import Kernel
__version__ = '0.2.0'
class MilvusKernel(Kernel):
implementation = 'milvus_kernel'
implementation_version = __version__
language = 'sql'
language_version = 'latest'
language_info = {'name': 'sql',
'mimetype': 'text/x-sh',
'file_extension': '.sql'}
banner = 'milvus kernel'
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self.engine = False
def output(self, output):
if not self.silent:
display_content = {'source': 'kernel',
'data': {'text/html': output},
'metadata': {}}
self.send_response(self.iopub_socket, 'display_data', display_content)
def ok(self):
return {'status':'ok', 'execution_count':self.execution_count, 'payload':[], 'user_expressions':{}}
def err(self, msg):
return {'status':'error',
'error':msg,
'traceback':[msg],
'execution_count':self.execution_count,
'payload':[],
'user_expressions':{}}
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
self.silent = silent
message = 'Unable to connect to Milvus server. Check that the server is running.'
output = ''
if not code.strip():
return self.ok()
sql = code.rstrip()+('' if code.rstrip().endswith(";") else ';')
try:
for v in sql.split(";"):
v = v.rstrip()
l = v.lower()
if len(l)>0:
if l.startswith('milvus://'):
self.engine = Milvus(uri=f'tcp://{v[9:]}')
elif l=='help':
output = pd.DataFrame(
{'description':[
'Create a collection',
'Drop a collection',
'Show all collections',
'Create a partition',
'Drop a partition',
'Show all partitions in a collection',
'Create an index',
'Removes an index',
'Flush data',
'Compact all segments in a collection',
'Search vectors',
'Delete vectors by ID',
'Insert a vector',
'Select vector',
'View metric type',
'View index type',
'View a collection description',
'View a collection statistics',
],
'milvus sql':[
"create table test01 where dimension=128 and index_file_size=1024 and metric_type='L2'",
'drop table test01',
'list table',
"create partition test01 where partition_tag='tag01'",
"drop partition test01 where partition_tag='tag01'",
'list partitions test01',
"create index test01 where index_type='FLAT' and nlist=4096",
'drop index test01',
'flush test01, test02',
'compact test01',
"select 2, 3, 5 from test01 where top_k=1 and partition_tags='tag01' and nprobe=16",
'delete test01 by id=1',
"insert 2,3,5 from test01 where partition_tag='tag01' by id=0",
'select test01 by id=1,2,3',
'help -metric',
'help -index',
'desc test01',
'stats test01',
]}).to_html()
elif l=='help -metric':
output = pd.DataFrame(
{'description':[
'HAMMING', 'INVALID', 'IP', 'JACCARD', 'L2', 'SUBSTRUCTURE', 'SUPERSTRUCTURE', 'TANIMOTO'
],
'milvus MetricType':[
'MetricType.HAMMING', 'MetricType.INVALID', 'MetricType.IP', 'MetricType.JACCARD', 'MetricType.L2',
'MetricType.SUBSTRUCTURE', 'MetricType.SUPERSTRUCTURE', 'MetricType.TANIMOTO'
]}).to_html()
elif l=='help -index':
output = pd.DataFrame(
{'description':[
'IVFLAT', 'ANNOY', 'FLAT', 'HNSW', 'INVALID', 'IVF_PQ', 'IVF_SQ8', 'IVF_SQ8H', 'RNSG'
],
'milvus IndexType':[
'IndexType.IVFLAT', 'IndexType.ANNOY', 'IndexType.FLAT', 'IndexType.HNSW', 'IndexType.INVALID',
'IndexType.IVF_PQ', 'IndexType.IVF_SQ8', 'IndexType.IVF_SQ8H', 'IndexType.RNSG'
]}).to_html()
elif l.startswith('desc '):
if not self.engine:
self.output(message)
return self.ok()
info_col = self.engine.get_collection_info(v[4:].strip())[1]
info_index = self.engine.get_index_info(v[4:].strip())[1]
desc = (['collection_name', 'dimension', 'index_file_size', 'metric_type', 'index_type']
+[i for i in info_index.params]+['row_count'])
info = ([info_col.collection_name, info_col.dimension, info_col.index_file_size, str(info_col.metric_type),
str(info_index.index_type)]+[info_index.params[i] for i in info_index.params]
+[self.engine.get_collection_stats(v[4:].strip())[1]['row_count']])
output = | pd.DataFrame({'description': desc, 'info': info}) | pandas.DataFrame |
# Control de datos
from io import BytesIO
from dateutil import tz
from pathlib import Path
from zipfile import ZipFile
from json import loads as loads_json
from datetime import datetime, timedelta
from requests import get as get_request
# Ingeniería de variables
from geopandas import read_file
from pandas import DataFrame, json_normalize, read_csv, concat
# Gráficas
from seaborn import scatterplot
from matplotlib.lines import Line2D
from contextily import add_basemap, providers
from matplotlib.pyplot import Axes, Figure, get_cmap
# Twitter
from twython import Twython
# Modelo
import ecoTad, ecoPredict
class EcoBiciMap:
def __init__(self, client_id: str, client_secret: str, twitter_key: str, twitter_secret: str, access_token: str, access_secret: str, is_local: bool=True) -> None:
'''
Define el directorio base, la URL base y las credenciales para el acceso a la API Ecobici
:client_id: user_uuid proporcionado por Ecobici. Más info en: https://www.ecobici.cdmx.gob.mx/sites/default/files/pdf/manual_api_opendata_esp_final.pdf
:secret_id: contraseña propoprcionada por Ecobici, en un correo aparte para mayor seguridad
'''
# Obtiene el directorio actual
if is_local: self.base_dir = Path('/Users/efrain.flores/Desktop/hub/ecobici_bot')
else: self.base_dir = Path().cwd()
self.csv_dir = self.base_dir.joinpath('data','csv')
self.shapefile_dir = self.base_dir.joinpath('data','shp')
# Dominio web base, de donde se anexarán rutas y parámetros
self.base_url = "https://pubsbapi-latam.smartbike.com"
# Ruta con las credenciales de acceso
self.user_credentials = f"oauth/v2/token?client_id={client_id}&client_secret={client_secret}"
# Guarda como atributos las credenciales necesarias para crear tweets
self.twitter_key = twitter_key
self.twitter_secret = twitter_secret
self.access_token = access_token
self.access_secret = access_secret
# Fecha y hora en la que se instancia la clase
self.started_at = datetime.now().astimezone(tz.gettz('America/Mexico_City'))
self.started_at_format = self.started_at.strftime(r'%d/%b/%Y %H:%M')
self.is_local = is_local
self.eb_map = {}
def __str__(self) -> str:
return f'''
{self.started_at_format}
Clase para extraer información de la API Ecobici (https://www.ecobici.cdmx.gob.mx/sites/default/files/pdf/manual_api_opendata_esp_final.pdf)
transformar, graficar la disponibilidad en un mapa de calor, exportar los datos y crear un tweet con el mapa.
'''
def get_token(self, first_time: bool=False) -> None:
'''
Guarda los tokens de acceso, necesarios para solicitar la información de estaciones y disponibilidad
:first_time:
- True para obtener ACCESS_TOKEN y REFRESH_TOKEN usando las credenciales por primera vez
- False para continuar con acceso a la API (después de 60min) y renovar ACCESS_TOKEN a través del REFRESH_TOKEN
'''
# URL completa para recibir el token de acceso y el token de actualización (se ocupa si la sesión dura más de 60min)
if first_time:
URL = f"{self.base_url}/{self.user_credentials}&grant_type=client_credentials"
# En el caso que se accese por 2a ocasión o más, se llama al token de actualización
else:
URL = f"{self.base_url}/{self.user_credentials}&grant_type=refresh_token&refresh_token={self.REFRESH_TOKEN}"
# Obtiene la respuesta a la solicitud de la URL, los datos vienen en bits
req_text = get_request(URL).text
# Convierte los bits a formato json para guardar los tokens
data = loads_json(req_text)
# Guarda los tokens como atributos
self.ACCESS_TOKEN = data['access_token']
self.REFRESH_TOKEN = data['refresh_token']
def get_data(self, availability: bool=False) -> DataFrame:
'''
Obtiene la información de estaciones y disponibilidad al momento
:availabilty:
- True para obtener los datos de disponibilidad
- False para obtener la información respecto a las estaciones
'''
# URL para obtener la información en tiempo real, ya sea la info de las estaciones y/o la disponibilidad de las mismas
stations_url = f"{self.base_url}/api/v1/stations{'/status' if availability else ''}.json?access_token={self.ACCESS_TOKEN}"
req_text = get_request(stations_url).text
data = loads_json(req_text)
# El json resultado tiene la data encapsulada en la primer llave
first_key = list(data.keys())[0]
# Se estructura como tabla
df = | json_normalize(data[first_key]) | pandas.json_normalize |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
| Series(np.nan, a.index) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ~~aliGater~~
# (semi)automated gating software
#
# /^^\
# /^^\_______/0 \_
# ( `~+++,,_________,,++~^^^^^^^
#..V^V^V^V^V^V^\.................................
#
#
# Parsing .fcs files with fcsparser from Eugene Yurtsevs FlowCytometryTools (very slightly modified)
# Check out his toolkit for flow cytometry analysis:
# http://eyurtsev.github.io/FlowCytometryTools/
#
# <NAME> & <NAME> 2016~
# https://www.med.lu.se/labmed/hematologi_och_transfusionsmedicin/forskning/bjoern_nilsson
# Distributed under the MIT License
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.patches import Ellipse, Arrow
from matplotlib.ticker import Locator, Formatter
from matplotlib import transforms as mtransforms
from matplotlib import rcParams
import math
import six
from scipy.ndimage.filters import gaussian_filter1d
#For computing bin width similarly to scipys histogram_bin_edges
from scipy.stats import iqr
from sklearn.decomposition import PCA
import sys
#AliGater imports
import aligater.AGConfig as agconf
from aligater.AGFileSystem import getGatedVector, AliGaterError
from aligater.AGCythonUtils import __vectorlogicleTransform, __vectorInverselogicleTransform, __vectorBilogTransform, __vectorInverseBilogTransform
sentinel = object()
def plotHeatmap(fcsDF, x, y, vI=sentinel, bins=300, scale='linear', xscale='linear', yscale='linear', thresh=1000, aspect='auto', **kwargs):
"""
Core plotting function of AliGater. Mainly intended to be called internally, but may be called directly.
Only plots. No gating functionalities.
**Parameters**
fcsDF : pandas.DataFrame
Flow data loaded in a pandas DataFrame.
x, y : str
Marker labels.
vI : list-like, optional
list-like index of events in the fcsDF that correspond to the parent population.
Defaults to plotting all events in fcsDF.
bins : int, optional, default: 300
Resolution of the plotted heatmap.
scale : str, optional, default: 'linear'
Which scale to be used on both axes.
xscale : str, optional, default: 'linear'
Which scale to be used on the x-axis.
yscale : str, optional, default: 'linear'
Which scale to be used on the y-axis.
T : int, optional, default: 1000
If the threshold for linear-loglike transition for bilog or logicle scales.
aspect : str
Aspect of plotted heatmap. Passed on to matplotlib.pyplot.imshow()
**Keyword arguments**
cmap : matplotlib.colors.Colormap or str, default: 'jet'
Color map to use.
Either string name of existing matplotlib colormap, or a colormap object.
rcParams : matplotlib.rcParams
Overrides rcParams with the passed rcParams object.
mask_where : float,int, default : 0
scalar of heatmap values to mask, these become white when plotted
**Returns**
fig, matplotlib.pyplot.Figure
matplotlib Figure object
ax. matplotlib.pyplot.Axes
matplotlib axes object
**Examples**
None currently.
"""
if vI is sentinel:
vI=fcsDF.index
elif len(vI)<2:
sys.stderr.write("Passed index contains no events\n")
return None, None
if not isinstance(bins,str) and len(vI)<bins:
bins=len(vI)
if scale.lower()=='logicle':
xscale='logicle'
yscale='logicle'
if scale.lower()=='bilog':
xscale='bilog'
yscale='bilog'
#Default x and y lims
bYlim=False
bXlim=False
if 'xlim' in kwargs:
if not isinstance(kwargs['xlim'],list):
raise TypeError("if xlim is passed, it must be a list of float/int")
elif not all(isinstance(i,(float,int)) for i in kwargs['xlim']):
raise TypeError("Non float/int element encountered in xlim")
else:
xscale_limits=kwargs['xlim']
if xscale.lower()=='logicle':
xscale_limits=logicleTransform(xscale_limits,thresh)
bXlim=True
if 'ylim' in kwargs:
if not isinstance(kwargs['ylim'],list):
raise TypeError("if ylim is passed, it must be a list of float/int")
elif not all(isinstance(i,(float,int)) for i in kwargs['ylim']):
raise TypeError("Non float/int element encountered in ylim")
else:
yscale_limits=kwargs['ylim']
if yscale.lower()=='logicle':
yscale_limits=logicleTransform(yscale_limits,thresh)
bYlim=True
if 'cmap' in kwargs:
cmap = kwargs['cmap']
if not isinstance(cmap, str):
collect_default=False
else:
collect_default=True
else:
collect_default=True
cmap='jet'
if 'rcParams' in kwargs:
if not isinstance(kwargs['rcParams'],dict):
raise TypeError("if rcParams is passed, it must be a dict")
else:
rcParams=kwargs['rcParams']
custom_rcParams=True
else:
custom_rcParams=False
if 'mask_where' in kwargs:
mask_value = kwargs['mask_where']
assert isinstance(mask_value,(float,int))
else:
mask_value=0
vX=getGatedVector(fcsDF, x, vI, return_type="nparray")
vY=getGatedVector(fcsDF, y, vI, return_type="nparray")
plt.clf()
if custom_rcParams:
plt.rcParams=rcParams
else:
plt.rcParams['figure.figsize']=10,10
plt.rcParams['image.cmap']=cmap
#extra
plt.rcParams['font.size'] = 22
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
#plt.rcParams['label.size': 22]
heatmap, xedges, yedges = getHeatmap(vX, vY, bins, scale, xscale, yscale, thresh)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
heatmap=np.ma.masked_where(heatmap <= mask_value, heatmap)
plt.clf()
fig, ax = plt.subplots()
plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, cmap=cmap)
#CLOSES ALL OPEN FIGURES ON CALL - PERHAPS BAD ?
plt.close('all')
fig = plt.figure()
ax = plt.gca()
#matplotlib 3.2.x changed behaviour of interpolation
#see https://github.com/matplotlib/matplotlib/issues/17722
#and https://matplotlib.org/3.2.1/api/api_changes.html#default-image-interpolation
plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, interpolation='none')
plt.xlabel(x)
plt.ylabel(y)
if collect_default:
cmap=plt.get_cmap()
cmap.set_bad(color='white') #Zeroes should be white, not blue
if xscale.lower()=='logicle':
ax=plt.gca()
ax.xaxis.set_major_locator(logicleLocator(linCutOff=thresh))
ax.xaxis.set_major_formatter(logicleFormatter(linCutOff=thresh))
if yscale.lower()=='logicle':
ax=plt.gca()
ax.yaxis.set_major_locator(logicleLocator(linCutOff=thresh))
ax.yaxis.set_major_formatter(logicleFormatter(linCutOff=thresh))
if xscale.lower()=='bilog':
ax=plt.gca()
ax.xaxis.set_major_locator(BiLogLocator(linCutOff=thresh))
ax.xaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh))
if yscale.lower()=='bilog':
ax=plt.gca()
ax.yaxis.set_major_locator(BiLogLocator(linCutOff=thresh))
ax.yaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh))
if bXlim:
ax.xaxis.set_xlim(left=xscale_limits[0], right=xscale_limits[1])
if bYlim:
ax.yaxis.set_xlim(left=yscale_limits[0], right=yscale_limits[1])
return fig,ax
def getHeatmap(vX, vY, bins='auto', scale='linear', xscale='linear', yscale='linear', T=1000, normalize=False, xlim=None, ylim=None, range=None):
if not any(isinstance(i,str) for i in [scale,xscale,yscale]):
raise TypeError("scale, xscale, yscale must be specified as string, such as: 'linear', 'logicle'")
if not all(i.lower() in ['linear', 'logicle', 'bilog'] for i in [scale,xscale,yscale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle'")
if not isinstance(bins,(int,str)):
raise TypeError("bins can only be either of int or str")
if range is not None:
if isinstance(range,list):
if len(range)==2:
if not all(isinstance(i,(list)) for i in range):
AliGaterError("in getHeatmap, invalid dtype encountered in range, expected two list-likes")
else:
if not all(isinstance(i,(float,int)) for i in range[0]) or not all(isinstance(i,(float,int)) for i in range[1]):
AliGaterError("in getHeatmap,invalid dtype encountered in range")
else:
defaultRange=range
xRange=range[0]
yRange=range[1]
else:
AliGaterError("in getHeatmap, range must be list, found "+str(type(range)))
else:
AliGaterError("in getHeatmap, custom range passed but is not list, found type: "+str(type(range)))
else:
defaultRange=None
xRange=None
yRange=None
if not len(vX) == len(vY):
raise AliGaterError("in getHeatmap: ","Coordinate vectors are of unequal length")
if len(vX)==0:
raise AliGaterError("in getHeatmap: ","Coordinate vectors are empty")
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in getHeatmap: ", "Couldn't coerce x-value vectors into numpy array format")
if not isinstance(vY,np.ndarray):
try:
vY=np.asarray(vY)
except:
raise AliGaterError("in getHeatmap: ", "Couldn't coerce x-value vectors into numpy array format")
index_mask=[]
for i in np.arange(len(vX)-1,-1,-1):
if xlim is not None:
if vX[i] < xlim[0] or vX[i] > xlim[1]:
index_mask.append(i)
continue
if vY[i] < ylim[0] or vY[i] > ylim[1]:
index_mask.append(i)
if len(index_mask) > 0:
vX = np.delete(vX, index_mask)
vY = np.delete(vY, index_mask)
assert len(vX) == len(vY)
if isinstance(bins, str):
xbin_edges=np.histogram_bin_edges(vX,bins=bins)
ybin_edges=np.histogram_bin_edges(vY,bins=bins)
else:
xbin_edges=bins
ybin_edges=bins
if scale.lower()=='linear' and xscale.lower()=='linear' and yscale.lower() == 'linear':
return np.histogram2d(vX, vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
#if not linear probably just transform and calc edges after
#attempt at fix, still some redundancy...
t_xbin_edges = t_ybin_edges = None
if scale.lower()!='linear' or (xscale.lower()!='linear' and yscale.lower()!='linear'):
t_vX = transformWrapper(vX, scale=xscale, T=T)
t_xbin_edges=np.histogram_bin_edges(t_vX,bins=bins)
xbin_edges = inverseTransformWrapper(t_xbin_edges, scale=xscale, T=T)
t_vY = transformWrapper(vY, scale=yscale, T=T)
t_ybin_edges=np.histogram_bin_edges(t_vY,bins=bins)
ybin_edges = inverseTransformWrapper(t_ybin_edges, scale=yscale, T=T)
return np.histogram2d(vX,vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
if xscale.lower()!='linear':
t_vX = transformWrapper(vX, scale=xscale, T=T)
t_xbin_edges=np.histogram_bin_edges(t_vX,bins=bins)
xbin_edges = inverseTransformWrapper(t_xbin_edges, scale=xscale, T=T)
ybin_edges = np.histogram_bin_edges(vY, bins=bins)
if yscale.lower()!='linear':
t_vY = transformWrapper(vY, scale=yscale, T=T)
t_ybin_edges=np.histogram_bin_edges(t_vY,bins=bins)
ybin_edges = inverseTransformWrapper(t_ybin_edges, scale=yscale, T=T)
xbin_edges = np.histogram_bin_edges(vX, bins=bins)
#print(ybin_edges)
#print("\n\n")
#print(xbin_edges)
#print("\n\n")
return np.histogram2d(vX,vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
#-------------------------DEPRECATED below---------------------------
if scale=='logicle' or (xscale == 'logicle' and yscale == 'logicle'):
xBinEdges=logicleBin(vX,bins,T, xRange)
yBinEdges=logicleBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [xBinEdges,yBinEdges], normed=normalize)
if xscale=='logicle':
xBinEdges=logicleBin(vX,bins,T, xRange)
return np.histogram2d(vX, vY, [xBinEdges,bins], normed=normalize)
if yscale=='logicle':
yBinEdges=logicleBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [bins,yBinEdges], normed=normalize)
if scale=='bilog' or (xscale == 'bilog' and yscale == 'bilog'):
xBinEdges=bilogBin(vX,bins,T, xRange)
yBinEdges=bilogBin(vY,bins,T, yRange)
#print("xBinEdges: ")
#print(xBinEdges)
#print("\n\n")
#print("yBinEdges: ")
#print(yBinEdges)
return np.histogram2d(vX, vY, [xBinEdges,yBinEdges], normed=normalize)
if xscale=='bilog':
xBinEdges=bilogBin(vX,bins,T, xRange)
return np.histogram2d(vX, vY, [xBinEdges,bins], normed=normalize)
if yscale=='bilog':
yBinEdges=bilogBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [bins,yBinEdges], normed=normalize)
def plot_flattened_heatmap(heatmap_array, nOfBins, mask=True):
reshaped_array = heatmap_array.reshape(nOfBins, nOfBins)
fig, ax = plt.subplots()
if mask:
heatmap=np.ma.masked_where(reshaped_array == 0, reshaped_array)
cmap=plt.get_cmap()
cmap.set_bad(color='white')
else:
heatmap=reshaped_array
plt.imshow(heatmap.T[::-1])
plt.show()
plt.clf()
return None
def transformWrapper(vX, T, scale):
"""
General function for converting values or arrays of values to AliGater scales; bilog and logicle.
See inverseTransformWrapper to convert the other way around.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert to; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
result=None
single_val=False
#ToDo raise if more than 1 dim?
if not isinstance(vX, (list, np.ndarray, tuple)):
if isinstance(vX, (float, int)):
vInput=np.asarray(vX).reshape(1,)
single_val=True
else:
raise AliGaterError("in transformWrapper","invalid dType of passed vX, must be either a single float/int value or list/np.ndarray/tuple of float/int values")
else:
vInput=vX
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in transformWrapper: ", "Couldn't coerce input vector to numpy array format")
if scale.lower() == 'logicle':
result = logicleTransform(vInput, T)
elif scale.lower() == 'bilog':
result=bilogTransform(vInput, T)
elif scale.lower() == 'linear':
result=vX
if result is None:
raise
if single_val:
result=result[0]
return result
def inverseTransformWrapper(vX, T, scale):
"""
General function for converting values or arrays of values from AliGater scales; bilog and logicle back to linear values.
See transformWrapper to convert into AliGater scales.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert from; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
result=None
single_val=False
if not isinstance(vX, (list, np.ndarray, tuple)):
if isinstance(vX, (float, int)):
vInput=np.asarray(vX).reshape(1,)
single_val=True
else:
raise AliGaterError("in inverseTransformWrapper","invalid dType of passed vX, must be either a single float/int value or list/np.ndarray/tuple of float/int values")
else:
vInput=vX
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in inverseTransformWrapper: ", "Couldn't coerce input vector to numpy array format")
if scale.lower() == 'logicle':
result = inverselogicleTransform(vInput, T)
elif scale.lower() == 'bilog':
result=inverseBilogTransform(vInput, T)
elif scale.lower() == 'linear':
result=vX
if result is None:
raise
if single_val:
result=result[0]
return result
def bilogBin(vX, bins, T, customRange=None):
if customRange is not None:
defaultRange=customRange
else:
defaultRange=[min(vX),max(vX)]
transformedRange=bilogTransform(defaultRange,T)
transformedBinEdges=np.linspace(transformedRange[0],transformedRange[1],bins+1)
return inverseBilogTransform(transformedBinEdges, T)
def bilogTransform(a, T):
vA = np.asarray(a, dtype = np.float64, order='C')
tA=__vectorBilogTransform(vA, np.float64(T))
return tA
# old python implementation, moved to AGCythonUtils
# tA = np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= T:
# tA[a_idx] = np.log(10 * a[a_idx] / T)/np.log(10)
# elif a[a_idx] < T and a[a_idx] > -T:
# tA[a_idx] = (a[a_idx]/T + np.log(10) - 1) / np.log(10)
# else:
# tA[a_idx] = -np.log(10 * abs(a[a_idx]) / T) / np.log(10)+1.13141103619349642 #This shift ensures that the transformed coordinates are continous, important for bins and plotting
# a_idx+=1
# return tA
def inverseBilogTransform(a, T):
vA = np.asarray(a, dtype = np.float64, order='C')
invA = __vectorInverseBilogTransform(vA, np.float64(T))
return invA
# old python implementation, moved to AGCythonUtils
# invA=np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= 1.0: #transformed linCutOff, always 1.0 at T; np.log(10 * linCutOff / linCutOff)/np.log(10) -> np.log(10)/np.log(10) = 1
# invA[a_idx] = T*np.exp(np.log(10)*a[a_idx])/10
# elif a[a_idx] <= 0.13141103619349642: #This is (np.log(10)-2)/np.log(10) I.e. the linear scale value at X=-T
# tmpX=a[a_idx]-1.13141103619349642 #This shift ensures that the transformed coordinates are continous, important for bins and plotting
# invA[a_idx] = -T*np.exp(np.log(10)*-tmpX)/10
# else:
# invA[a_idx] = T * (np.log(10)*a[a_idx] - np.log(10) + 1)
# a_idx+=1
# return invA
def logicleBin(vX, bins, T, customRange=None):
if customRange is not None:
defaultRange=customRange
else:
defaultRange=[min(vX),max(vX)]
transformedRange=logicleTransform(defaultRange,T)
transformedBinEdges=np.linspace(transformedRange[0],transformedRange[1],bins+1)
return inverselogicleTransform(transformedBinEdges, T)
def logicleTransform(a, linCutOff):
vA = np.asarray(a, dtype = np.float64, order='C')
tA=__vectorlogicleTransform(vA, np.float64(linCutOff))
return tA
# old python implementation, moved to AGCythonUtils
# tA = np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= linCutOff:
# tA[a_idx] = np.log(10 * a[a_idx] / linCutOff)/np.log(10)
# else:
# tA[a_idx] = (a[a_idx]/linCutOff + np.log(10.0) - 1)/np.log(10)
# a_idx+=1
#return tA
def inverselogicleTransform(a, linCutOff):
vA = np.asarray(a, dtype = np.float64, order='C')
invA = __vectorInverselogicleTransform(vA, np.float64(linCutOff))
return invA
# old python implementation, moved to AGCythonUtils
# invA=np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= 1.0: #transformed linCutOff, always 1.0 at T; np.log(10 * linCutOff / linCutOff)/np.log(10) -> np.log(10)/np.log(10) = 1
# invA[a_idx] = linCutOff*np.exp(np.log(10)*a[a_idx])/10
# #invA[a_idx]= (np.exp(a[a_idx])+10)*linCutOff/10
# else:
# invA[a_idx] = linCutOff*(np.log(10.0)*a[a_idx] - np.log(10.0) + 1)
# a_idx+=1
# return invA
def addAxLine(fig, ax, pos, orientation, size=2, scale='linear', xscale='linear', yscale='linear', T=1000):
if not all(i in ['linear', 'logicle', 'bilog'] for i in [scale, xscale, yscale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if orientation.lower()=='vertical':
if scale.lower() != 'linear' or xscale.lower() != 'linear':
lims=ax.get_xlim()
vmin = lims[0]
vmax = lims[1]
if scale.lower() == 'logicle' or xscale.lower() == 'logicle':
pos = convertTologiclePlotCoordinate(pos,vmin,vmax,T)
if scale.lower() == 'bilog' or xscale.lower() == 'bilog':
pos = convertToBiLogPlotCoordinate(pos,vmin,vmax,T)
ax.axvline(pos, c='r')
else:
if scale.lower() !='linear' or yscale.lower() != 'linear':
lims=ax.get_ylim()
vmin = lims[0]
vmax = lims[1]
if scale=='logicle' or yscale.lower() == 'logicle':
pos = convertTologiclePlotCoordinate(pos,vmin,vmax,T)
if scale.lower() == 'bilog' or yscale.lower() == 'bilog':
pos = convertToBiLogPlotCoordinate(pos,vmin,vmax,T)
ax.axhline(pos, c='r')
return fig
def addLine(fig, ax, lStartCoordinate, lEndCoordinate, size=2, scale='linear', T=1000):
if not scale.lower() in ['linear', 'logicle', 'bilog']:
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if scale.lower()=='logicle':
view=ax.xaxis.get_view_interval()
xCoordinates=convertTologiclePlotCoordinates([lStartCoordinate[0],lEndCoordinate[0]], vmin=view[0], vmax=view[1], T=T)
view=ax.yaxis.get_view_interval()
yCoordinates=convertTologiclePlotCoordinates([lStartCoordinate[1],lEndCoordinate[1]], vmin=view[0], vmax=view[1], T=T)
lStartCoordinate=[xCoordinates[0],yCoordinates[0]]
lEndCoordinate=[xCoordinates[1],yCoordinates[1]]
if scale.lower()=='bilog':
view=ax.xaxis.get_view_interval()
xCoordinates=convertToBiLogPlotCoordinates([lStartCoordinate[0],lEndCoordinate[0]], vmin=view[0], vmax=view[1], T=T)
view=ax.yaxis.get_view_interval()
yCoordinates=convertToBiLogPlotCoordinates([lStartCoordinate[1],lEndCoordinate[1]], vmin=view[0], vmax=view[1], T=T)
lStartCoordinate=[xCoordinates[0],yCoordinates[0]]
lEndCoordinate=[xCoordinates[1],yCoordinates[1]]
plt.plot([lStartCoordinate[0], lEndCoordinate[0]], [lStartCoordinate[1], lEndCoordinate[1]], color='r', linestyle='-', linewidth=size,figure=fig)
return fig, ax
def addArrow(fig, ax, lStartCoordinate, lEndCoordinate, size=5000):
arrow=Arrow(lStartCoordinate[0],lStartCoordinate[1],lEndCoordinate[0]-lStartCoordinate[0],lEndCoordinate[1]-lStartCoordinate[1],width=size, transform=ax.transAxes,head_width=size, head_length=size, fc='r', ec='r')
#ax.arrow(lStartCoordinate[0], lStartCoordinate[1], lEndCoordinate[0]-lStartCoordinate[0], lEndCoordinate[1]-lStartCoordinate[1], head_width=size, head_length=size, fc='r', ec='r')
ax.add_patch(arrow)
return fig
def draw_ellipse(position, covariance, sigma=2, ax=None, **kwargs):
if agconf.execMode in ["jupyter","ipython"]:
plot=True
else:
plot=False
if plot:
ax = ax or plt.gca();
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width,height = np.sqrt(s)*sigma
else:
angle = 0
width, height = np.sqrt(covariance)*sigma
#Note width, height here is the full width and height and not the semiaxis length
# Draw the Ellipse
if plot:
ax.add_patch(Ellipse(position, width, height,
angle, **kwargs));
return width, height, angle
def plot_gmm(fcsDF, xCol, yCol, vI, gmm, sigma, ax):
if agconf.execMode in ["jupyter","ipython"]:
plot=True
else:
plot=False
ax = ax or plt.gca()
vEllipses=[]
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
width, height, angle = draw_ellipse(pos, covar, sigma, fill=False,edgecolor='#FF0000', linestyle='dashed');
vEllipses.append([pos,width,height,angle])
if plot:
plt.show();
return vEllipses
def plot_densityFunc(fcsDF, xCol,vI=sentinel, sigma=3, bins=300, scale='linear', T=1000, *args, **kwargs):
"""
General function for converting values or arrays of values from AliGater scales; bilog and logicle back to linear values.
See transformWrapper to convert into AliGater scales.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert from; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
if xCol not in fcsDF.columns:
raise TypeError("Specified gate not in dataframe, check spelling or control your dataframe.columns labels")
if vI is sentinel:
vI=fcsDF.index
elif len(vI)==0:
sys.stderr.write("Passed index contains no events\n")
return None
if not all(i in ['linear', 'logicle', 'bilog'] for i in [scale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if not isinstance(sigma,(float,int)):
raise AliGaterError("Sigma must be float or int, found: "+str(type(sigma)),"in plot_densityFunc")
if 'sigma' in kwargs:
if not isinstance(kwargs['sigma'],(float,int)):
raise AliGaterError("Sigma must be float or int, found: "+str(type(sigma)),"in plot_densityFunc")
else:
sigma=kwargs['sigma']
data=getGatedVector(fcsDF, xCol, vI, return_type="nparray")
if isinstance(bins,int):
if len(vI)<bins:
sys.stderr.write("Fewer events than bins, readjusting number of bins\n")
bins=len(vI)
elif bins=='auto':
if scale.lower()!='linear':
t_data = transformWrapper(data, T=T, scale=scale)
else:
t_data=data
bins=__autoBinCount(t_data)
else:
raise AliGaterError("bins must be integer or string 'auto'","in plot_densityFunc")
if scale == 'logicle':
BinEdges=logicleBin(data,bins,T)
histo = np.histogram(data, BinEdges)
elif scale == 'bilog':
BinEdges=bilogBin(data,bins,T)
histo = np.histogram(data, BinEdges)
else:
histo=np.histogram(data, bins)
vHisto=np.linspace(min(histo[1]),max(histo[1]),bins)
smoothedHisto=gaussian_filter1d(histo[0].astype(float),sigma)
plt.clf()
fig,ax = plt.subplots()
ax.plot(vHisto,smoothedHisto, label="pdf for "+str(xCol)+"\nsigma: "+str(sigma))
plt.legend(loc='upper right', shadow=True, fontsize='medium')
if scale.lower()!='linear':
ax=plt.gca()
ax.set_xlim(left=min(data),right=max(data))
if scale.lower()=='logicle':
ax.xaxis.set_major_locator(logicleLocator(linCutOff=T))
ax.xaxis.set_major_formatter(logicleFormatter(linCutOff=T))
if scale.lower()=='bilog':
ax.xaxis.set_major_locator(BiLogLocator(linCutOff=T))
ax.xaxis.set_major_formatter(BiLogFormatter(linCutOff=T))
#plt.show()
return fig,ax
def __autoBinCount(data):
#Internal function that mimics numpus numpy.histogram_bin_edges functionality to guess appropriate number of bins
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram_bin_edges.html
data_IQR = iqr(data)
n=len(data)
fd_h = 2*(data_IQR/(np.power(n,(1/3)))) #Freedman Diaconis Estimator
fd_bins = np.round(np.ceil((max(data)-min(data)) / fd_h)) #np.round(np.ceil(range / h))
s_bins = np.log2(n)+1 #Sturges estimator
bins=int(max([fd_bins,s_bins]))
return bins
def imagePCA_cluster(imlist, samplelist, nOfComponents=2):
immatrix = np.array([im.flatten() for im in imlist],'f')
#Check for nan elements in matrix
if np.isnan(immatrix).any():
array_has_nan = np.array([np.isnan(arr).any() for arr in immatrix])
removed_images = samplelist[array_has_nan]
imlist = imlist[~array_has_nan]
samplelist=samplelist[~array_has_nan]
n_of_nan=array_has_nan.sum()
reportStr=str(n_of_nan)+" samples had invalid images and where removed:\n"+"\n".join(removed_images)+"\n"
sys.stderr.write(reportStr)
immatrix = np.array([im.flatten() for im in imlist],'f')
if immatrix.shape[0] == 0:
reportStr="No data in passed image matrix\n"
sys.stderr.write(reportStr)
return None
if immatrix.shape[0] < nOfComponents:
reportStr="WARNING: fewer samples than requested components for PC analysis, adjusting\n"
sys.stderr.write(reportStr)
nOfComponents=immatrix.shape[0]
pca_obj = PCA(n_components=nOfComponents)
pca_obj.fit(immatrix)
projection_d = pca_obj.transform(immatrix)
projection_d_df = | pd.DataFrame(projection_d) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn.model_selection import KFold
import warnings
import time
import sys
import datetime
from sklearn.metrics import mean_squared_error
warnings.simplefilter(action='ignore', category=FutureWarning)
pd.set_option('display.max_columns', 500)
def reduce_mem_usage(df, verbose=True):
'''[summary]
看起来好像是数据清洗,减少内存的使用
Arguments:
df {[type]} -- [description]
Keyword Arguments:
verbose {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
'''
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# parse_dates是什么意思?
new_transactions = pd.read_csv(r'D:\workspace\MachineLearning\Kaggle\Elo_Merchant_Category_Recommendation\dataset\new_merchant_transactions.csv',parse_dates=['purchase_date'])
historical_transactions = pd.read_csv(r'D:\workspace\MachineLearning\Kaggle\Elo_Merchant_Category_Recommendation\dataset\historical_transactions.csv',parse_dates=['purchase_date'])
def binarize(df):
'''[summary]
二值化,用于将布尔型数据进行转化。
Arguments:
df {[type]} -- [description]
Returns:
[type] -- [description]
'''
for col in ['authorized_flag', 'category_1']:
df[col] = df[col].map({'Y':1, 'N':0})
return df
historical_transactions = binarize(historical_transactions)
new_transactions = binarize(new_transactions)
# 格式化日期数据,formatting the dates
def read_data(input_file):
df = pd.read_csv(input_file)
df['first_active_month'] = pd.to_datetime(df['first_active_month'])
df['elapsed_time'] = (datetime.date(2018, 2, 1) - df['first_active_month'].dt.date).dt.days
return df
# load the main files, and extracting the target
train = read_data(r'D:\workspace\MachineLearning\Kaggle\Elo_Merchant_Category_Recommendation\dataset\train.csv')
test = read_data(r'D:\workspace\MachineLearning\Kaggle\Elo_Merchant_Category_Recommendation\dataset\test.csv')
target = train['target']
del train['target']
# 特征工程 Feature engineering
historical_transactions['month_diff'] = ((datetime.datetime.today() - historical_transactions['purchase_date']).dt.days)//30
historical_transactions['month_diff'] += historical_transactions['month_lag']
new_transactions['month_diff'] = ((datetime.datetime.today() - new_transactions['purchase_date']).dt.days)//30
new_transactions['month_diff'] += new_transactions['month_lag']
# historical_transactions[:5]
historical_transactions = pd.get_dummies(historical_transactions, columns=['category_2', 'category_3'])
new_transactions = pd.get_dummies(new_transactions, columns=['category_2', 'category_3'])
historical_transactions = reduce_mem_usage(historical_transactions)
new_transactions = reduce_mem_usage(new_transactions)
agg_fun = {'authorized_flag': ['mean']}
auth_mean = historical_transactions.groupby(['card_id']).agg(agg_fun)
auth_mean.columns = ['_'.join(col).strip() for col in auth_mean.columns.values]
auth_mean.reset_index(inplace=True)
authorized_transactions = historical_transactions[historical_transactions['authorized_flag'] == 1]
historical_transactions = historical_transactions[historical_transactions['authorized_flag'] == 0]
# define a few dates features
historical_transactions['purchase_month'] = historical_transactions['purchase_date'].dt.month
authorized_transactions['purchase_month'] = authorized_transactions['purchase_date'].dt.month
new_transactions['purchase_month'] = new_transactions['purchase_date'].dt.month
# Then I define two functions that aggregate the info contained in these two tables. The first function aggregates the function by grouping on `card_id`
def aggregate_transactions(history):
history.loc[:, 'purchase_date'] = pd.DatetimeIndex(history['purchase_date']).\
astype(np.int64) * 1e-9
agg_func = {
'category_1': ['sum', 'mean'],
'category_2_1.0': ['mean'],
'category_2_2.0': ['mean'],
'category_2_3.0': ['mean'],
'category_2_4.0': ['mean'],
'category_2_5.0': ['mean'],
'category_3_A': ['mean'],
'category_3_B': ['mean'],
'category_3_C': ['mean'],
'merchant_id': ['nunique'],
'merchant_category_id': ['nunique'],
'state_id': ['nunique'],
'city_id': ['nunique'],
'subsector_id': ['nunique'],
'purchase_amount': ['sum', 'mean', 'max', 'min', 'std'],
'installments': ['sum', 'mean', 'max', 'min', 'std'],
'purchase_month': ['mean', 'max', 'min', 'std'],
'purchase_date': [np.ptp, 'min', 'max'],
'month_lag': ['mean', 'max', 'min', 'std'],
'month_diff': ['mean']
}
agg_history = history.groupby(['card_id']).agg(agg_func)
agg_history.columns = ['_'.join(col).strip() for col in agg_history.columns.values]
agg_history.reset_index(inplace=True)
df = (history.groupby('card_id')
.size()
.reset_index(name='transactions_count'))
agg_history = pd.merge(df, agg_history, on='card_id', how='left')
return agg_history
history = aggregate_transactions(historical_transactions)
history.columns = ['hist_' + c if c != 'card_id' else c for c in history.columns]
history[:5]
authorized = aggregate_transactions(authorized_transactions)
authorized.columns = ['auth_' + c if c != 'card_id' else c for c in authorized.columns]
authorized[:5]
new = aggregate_transactions(new_transactions)
new.columns = ['new_' + c if c != 'card_id' else c for c in new.columns]
new[:5]
# The second function first aggregates on the two variables `card_id` and `month_lag`. Then a second grouping is performed to aggregate over time:
def aggregate_per_month(history):
grouped = history.groupby(['card_id', 'month_lag'])
agg_func = {
'purchase_amount': ['count', 'sum', 'mean', 'min', 'max', 'std'],
'installments': ['count', 'sum', 'mean', 'min', 'max', 'std'],
}
intermediate_group = grouped.agg(agg_func)
intermediate_group.columns = ['_'.join(col).strip() for col in intermediate_group.columns.values]
intermediate_group.reset_index(inplace=True)
final_group = intermediate_group.groupby('card_id').agg(['mean', 'std'])
final_group.columns = ['_'.join(col).strip() for col in final_group.columns.values]
final_group.reset_index(inplace=True)
return final_group
#___________________________________________________________
final_group = aggregate_per_month(authorized_transactions)
final_group[:10]
def successive_aggregates(df, field1, field2):
t = df.groupby(['card_id', field1])[field2].mean()
u = pd.DataFrame(t).reset_index().groupby('card_id')[field2].agg(['mean', 'min', 'max', 'std'])
u.columns = [field1 + '_' + field2 + '_' + col for col in u.columns.values]
u.reset_index(inplace=True)
return u
additional_fields = successive_aggregates(new_transactions, 'category_1', 'purchase_amount')
additional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'installments', 'purchase_amount'),
on = 'card_id', how='left')
additional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'city_id', 'purchase_amount'),
on = 'card_id', how='left')
additional_fields = additional_fields.merge(successive_aggregates(new_transactions, 'category_1', 'installments'),
on = 'card_id', how='left')
# 3. Training the model
# We now train the model with the features we previously defined. A first step consists in merging all the dataframes:
train = pd.merge(train, history, on='card_id', how='left')
test = pd.merge(test, history, on='card_id', how='left')
train = | pd.merge(train, authorized, on='card_id', how='left') | pandas.merge |
import re
from datetime import datetime
import nose
import pytz
import platform
from time import sleep
import os
import logging
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas import NaT
from pandas.compat import u, range
from pandas.core.frame import DataFrame
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.compat.numpy import np_datetime64_compat
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
_IMPORTS = False
_GOOGLE_API_CLIENT_INSTALLED = False
_GOOGLE_API_CLIENT_VALID_VERSION = False
_HTTPLIB2_INSTALLED = False
_SETUPTOOLS_INSTALLED = False
def _skip_if_no_project_id():
if not _get_project_id():
raise nose.SkipTest(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json file path")
def _skip_if_no_private_key_contents():
if not _get_private_key_contents():
raise nose.SkipTest("Cannot run integration tests without a "
"private key json contents")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
else:
return PROJECT_ID
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
else:
return PRIVATE_KEY_JSON_PATH
def _get_private_key_contents():
if _in_travis_environment():
with open(os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])) as f:
return f.read()
else:
return PRIVATE_KEY_JSON_CONTENTS
def _test_imports():
global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \
_HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED
try:
import pkg_resources
_SETUPTOOLS_INSTALLED = True
except ImportError:
_SETUPTOOLS_INSTALLED = False
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
if _SETUPTOOLS_INSTALLED:
try:
try:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
except:
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
from oauth2client.client import OAuth2WebServerFlow # noqa
from oauth2client.client import AccessTokenRefreshError # noqa
from oauth2client.file import Storage # noqa
from oauth2client.tools import run_flow # noqa
_GOOGLE_API_CLIENT_INSTALLED = True
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution(
'google-api-python-client').version
if (StrictVersion(_GOOGLE_API_CLIENT_VERSION) >=
StrictVersion(google_api_minimum_version)):
_GOOGLE_API_CLIENT_VALID_VERSION = True
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
try:
import httplib2 # noqa
_HTTPLIB2_INSTALLED = True
except ImportError:
_HTTPLIB2_INSTALLED = False
if not _SETUPTOOLS_INSTALLED:
raise ImportError('Could not import pkg_resources (setuptools).')
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('Could not import Google API Client.')
if not _GOOGLE_API_CLIENT_VALID_VERSION:
raise ImportError("pandas requires google-api-python-client >= {0} "
"for Google BigQuery support, "
"current version {1}"
.format(google_api_minimum_version,
_GOOGLE_API_CLIENT_VERSION))
if not _HTTPLIB2_INSTALLED:
raise ImportError(
"pandas requires httplib2 for Google BigQuery support")
# Bug fix for https://github.com/pandas-dev/pandas/issues/12572
# We need to know that a supported version of oauth2client is installed
# Test that either of the following is installed:
# - SignedJwtAssertionCredentials from oauth2client.client
# - ServiceAccountCredentials from oauth2client.service_account
# SignedJwtAssertionCredentials is available in oauthclient < 2.0.0
# ServiceAccountCredentials is available in oauthclient >= 2.0.0
oauth2client_v1 = True
oauth2client_v2 = True
try:
from oauth2client.client import SignedJwtAssertionCredentials # noqa
except ImportError:
oauth2client_v1 = False
try:
from oauth2client.service_account import ServiceAccountCredentials # noqa
except ImportError:
oauth2client_v2 = False
if not oauth2client_v1 and not oauth2client_v2:
raise ImportError("Missing oauth2client required for BigQuery "
"service account support")
def _setup_common():
try:
_test_imports()
except (ImportError, NotImplementedError) as import_exception:
raise nose.SkipTest(import_exception)
if _in_travis_environment():
logging.getLogger('oauth2client').setLevel(logging.ERROR)
logging.getLogger('apiclient').setLevel(logging.ERROR)
def _check_if_can_get_correct_default_credentials():
# Checks if "Application Default Credentials" can be fetched
# from the environment the tests are running in.
# See Issue #13577
import httplib2
try:
from googleapiclient.discovery import build
except ImportError:
from apiclient.discovery import build
try:
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
jobs = bigquery_service.jobs()
job_data = {'configuration': {'query': {'query': 'SELECT 1'}}}
jobs.insert(projectId=_get_project_id(), body=job_data).execute()
return True
except:
return False
def clean_gbq_environment(private_key=None):
dataset = gbq._Dataset(_get_project_id(), private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
def test_generate_bq_schema_deprecated():
# 11121 Deprecation of generate_bq_schema
with tm.assert_produces_warning(FutureWarning):
df = make_mixed_dataframe_v2(10)
gbq.generate_bq_schema(df)
class TestGBQConnectorIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
def test_get_application_default_credentials_does_not_throw_error(self):
if _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Can get default_credentials "
"from the environment!")
credentials = self.sut.get_application_default_credentials()
self.assertIsNone(credentials)
def test_get_application_default_credentials_returns_credentials(self):
if not _check_if_can_get_correct_default_credentials():
raise nose.SkipTest("Cannot get default_credentials "
"from the environment!")
from oauth2client.client import GoogleCredentials
credentials = self.sut.get_application_default_credentials()
self.assertTrue(isinstance(credentials, GoogleCredentials))
class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase):
def setUp(self):
_setup_common()
_skip_if_no_project_id()
_skip_if_no_private_key_path()
self.sut = gbq.GbqConnector(_get_project_id(),
private_key=_get_private_key_path())
def test_should_be_able_to_make_a_connector(self):
self.assertTrue(self.sut is not None,
'Could not create a GbqConnector')
def test_should_be_able_to_get_valid_credentials(self):
credentials = self.sut.get_credentials()
self.assertFalse(credentials.invalid, 'Returned credentials invalid')
def test_should_be_able_to_get_a_bigquery_service(self):
bigquery_service = self.sut.get_service()
self.assertTrue(bigquery_service is not None, 'No service returned')
def test_should_be_able_to_get_schema_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(schema is not None)
def test_should_be_able_to_get_results_from_query(self):
schema, pages = self.sut.run_query('SELECT 1')
self.assertTrue(pages is not None)
class GBQUnitTests(tm.TestCase):
def setUp(self):
_setup_common()
def test_import_google_api_python_client(self):
if compat.PY2:
with tm.assertRaises(ImportError):
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
from apiclient.discovery import build # noqa
from apiclient.errors import HttpError # noqa
else:
from googleapiclient.discovery import build # noqa
from googleapiclient.errors import HttpError # noqa
def test_should_return_bigquery_integers_as_python_floats(self):
result = gbq._parse_entry(1, 'INTEGER')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_floats_as_python_floats(self):
result = gbq._parse_entry(1, 'FLOAT')
tm.assert_equal(result, float(1))
def test_should_return_bigquery_timestamps_as_numpy_datetime(self):
result = gbq._parse_entry('0e9', 'TIMESTAMP')
tm.assert_equal(result, np_datetime64_compat('1970-01-01T00:00:00Z'))
def test_should_return_bigquery_booleans_as_python_booleans(self):
result = gbq._parse_entry('false', 'BOOLEAN')
tm.assert_equal(result, False)
def test_should_return_bigquery_strings_as_python_strings(self):
result = gbq._parse_entry('STRING', 'STRING')
| tm.assert_equal(result, 'STRING') | pandas.util.testing.assert_equal |
#!/usr/bin/env python
# Copyright 2019 <NAME>
# See LICENSE for details.
__author__ = "<NAME> <<EMAIL>>"
import pandas as pd
from os import walk
from copy import deepcopy
def fix_month_format(element):
"""
This function converts the abbreviation of a Spanish month into its corresponding month number.
Args:
element (:obj:`str`): name of the month in Spanish. Abbreviation of the first 3 letters.
Returns:
:obj:`str`: The function returns the corresponding number as string.
"""
meses = {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 8,
'oct': 8, 'nov': 8, 'dic': 12}
for word, initial in meses.items():
element = element.replace(word, '0' + str(initial))
return element
def fix_date_format(df_, date_format='%d %m %Y %H:%M', date_column_name='hour'): #podría generalizarse
"""
This function converts the data column into 'datetime' format.
Args:
df_ (:obj:`pandas.DataFrame`): Input dataset.
date_format (:obj:`str`, optional): format in which the dates are embedded.
date_column_name (:obj:`str`, optional): name of the column containing the dates.
Returns:
:obj:`pandas.DataFrame`: The function returns a dataframe with the date column in datetime format.
"""
if len(df_[date_column_name][0]) == 33:
df2 = df_[date_column_name].map(lambda x: fix_month_format(str(x)[5:-11]))
else:
print('ERROR: unrecognised date format.\nNo changes are applied.')
return df_
df2 = pd.to_datetime(df2, format=date_format)
# cambio las columnas
df_ = df_.drop(columns=['hour'])
df_['date_time'] = df2
return df_
def load_data(original_path='./concatenado', date_format='%d %m %Y %H:%M', sort_values=True, date_column_name='hour'):
"""
This function loads information from several files and outputs a single dataset containing all the information.
Args:
original_path (:obj:`str`, optional): path where all the files are located.
date_format (:obj:`str`, optional): format in which the dates are embedded.
sort_values (:obj:`bool`, optional): sort the values by data or preserve the original order.
date_column_name (:obj:`str`, optional): name of the column containing the dates.
Returns:
:obj:`pandas.DataFrame`:
The function returns a pandas.DataFrame containing all the loaded data.
"""
paths = next(walk(original_path))[2]
files = [[] for _ in range(len(paths))]
for i in range(len(paths)):
for (dirpath, dirnames, filenames) in walk(original_path + paths[i]):
files[i].extend(filenames)
break
df = pd.DataFrame()
for file in paths:
try:
df_new = pd.read_csv(original_path + '/' + file, encoding='utf-8',
engine='python', index_col=False)
df = df.append([df_new], ignore_index=True)
except Exception as e:
print(file)
print(e)
continue
df = fix_date_format(df, date_format=date_format, date_column_name=date_column_name)
if sort_values:
df = df.sort_values(by=['date_time']).reset_index(drop=True)
return df
def df_house_sensor(df_, house_number, sensor):
"""
This function extracts the information of a specific sensor in a certain house from a dataframe.
Args:
df_ (:obj:`pandas.DataFrame`): dataframe containing all data.
house_number (:obj:`int`): number of the house which data is getting extracted.
sensor (:obj:`int` or :obj:`str`): name/number of the sensor which data is getting extracted.
Returns:
:obj:`pandas.DataFrame`:
The function returns a dataframe containing the data of a specific sensor in a certain house.
"""
df_grouped = df_.groupby(['house', 'sensor'])
df_dates = pd.DataFrame(df_grouped['date_time'].apply(list).values.tolist(), index=df_grouped.groups)
df_weights = pd.DataFrame(df_grouped['value'].apply(list).values.tolist(), index=df_grouped.groups)
df_dates.columns = df_dates.columns * 2
df_weights.columns = df_weights.columns * 2 + 1
res = pd.concat([df_dates, df_weights], axis=1).sort_index(1)
if type(sensor) == str:
df_one_row = res.loc[str(house_number)].T[sensor].values
else:
df_one_row = res.loc[str(house_number)].iloc[sensor].values
date_time = [df_one_row[i] for i in range(len(df_one_row)) if i % 2 == 0]
values = [df_one_row[i] for i in range(len(df_one_row)) if i % 2 == 1]
return pd.DataFrame({'date_time': date_time, 'values': values})
def adapt_frequency(df_, new_frequency=60, start_date=None, end_date=None, time_column_name='date_time'):
"""
This function changes the refresh frequency of a dataframe.
Args:
df_ (:obj:`pandas.DataFrame`): dataframes of all houses.
new_frequency (:obj:`int`, optional): refresh frequency in minutes of the output.
start_date (:obj:`datetime`, optional): left extreme of the selected time interval.
end_date (:obj:`datetime`, optional): right extreme of the selected time interval.
time_column_name (:obj:`str`, optional): name of the column containing the time information.
Returns:
:obj:`pandas.DataFrame`: The function returns a pandas dataframe with the selected refresh frequency.
"""
if not start_date:
start_date = df_.dropna().date_time[0]
if not end_date:
end_date = df_.dropna().date_time.values[-1]
new_range = pd.date_range(start_date, end_date, freq=str(new_frequency) + 'min')
df_new_range = pd.DataFrame(data=new_range, columns=[time_column_name])
df_new_range['0'] = ''
new_df = pd.concat([df_, df_new_range], sort=True).sort_values(by=time_column_name)
return new_df.interpolate().dropna().drop(['0'], axis=1).set_index(time_column_name) # PODRIA SER MULTIHILO
def get_df_house(df_, house_number, frequency=60, time_column_name='date_time'):
"""
This function extracts the dataframe of a specific house from a general dataframe.
Args:
df_ (:obj:`pandas.DataFrame`): dataframes of all houses.
house_number (:obj:`str`): number of the selected house.
frequency (:obj:`int`, optional): refresh frequency in minutes of the output.
time_column_name (:obj:`str`, optional): name of the column containing the time information.
Returns:
:obj:`pandas.DataFrame`: The function returns a pandas dataframe with all the information of the selected house.
"""
if str(house_number) not in df_.casa.unique():
return 'House data not available'
sensors = df_[df_.casa == str(house_number)].sensor.unique()
house_df = | pd.DataFrame() | pandas.DataFrame |
#将所有的数据处理函数都包含到此文件下
import SimpleITK as sitk
import os
import json
import glob
import SimpleITK as sitk
import pandas as pd
import matplotlib.pyplot as plt
# dcm数据处理函数
def dicom_metainfo(dicm_path, list_tag):
'''
获取dicom的元数据信息
:param dicm_path: dicom文件地址
:param list_tag: 标记名称列表,比如['0008|0018',]
:return:
'''
reader = sitk.ImageFileReader()
reader.LoadPrivateTagsOn()
reader.SetFileName(dicm_path)
reader.ReadImageInformation()
return [reader.GetMetaData(t) for t in list_tag]
def dicom2array(dcm_path):
'''
读取dicom文件并把其转化为灰度图(np.array)
https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
:param dcm_path: dicom文件
:return:
'''
image_file_reader = sitk.ImageFileReader()
image_file_reader.SetImageIO('GDCMImageIO')
image_file_reader.SetFileName(dcm_path)
image_file_reader.ReadImageInformation()
image = image_file_reader.Execute()
if image.GetNumberOfComponentsPerPixel() == 1:
image = sitk.RescaleIntensity(image, 0, 255)
if image_file_reader.GetMetaData('0028|0004').strip() == 'MONOCHROME1':
image = sitk.InvertIntensity(image, maximum=255)
image = sitk.Cast(image, sitk.sitkUInt8)
img_x = sitk.GetArrayFromImage(image)[0]
return img_x
# json文件处理函数
def get_info(train_path,json_path):
annotation_info = pd.DataFrame(columns=('studyUid','seriesUid','instanceUid','annotation'))
json_df = | pd.read_json(json_path) | pandas.read_json |
import pandas as pd
import numpy as np
from functools import wraps
import copy
# Pass through pd.DataFrame methods for a (1,1,o,d) shaped triangle:
df_passthru = ['to_clipboard', 'to_csv', 'to_pickle', 'to_excel', 'to_json',
'to_html', 'to_dict', 'unstack', 'pivot', 'drop_duplicates',
'describe', 'melt']
# Aggregate method overridden to the 4D Triangle Shape
agg_funcs = ['sum', 'mean', 'median', 'max', 'min', 'prod', 'var', 'std']
agg_funcs = {item: 'nan'+item for item in agg_funcs}
# def check_triangle_postcondition(f):
# ''' Post-condition check to ensure the integrity of the triangle object
# remains intact. (used for debugging)
# '''
# @wraps(f)
# def wrapper(*args, **kwargs):
# X = f(*args, **kwargs)
# if not hasattr(X, 'triangle'):
# raise ValueError('X is missing triangle attribute')
# if X.triangle.ndim != 4:
# raise ValueError('X.triangle must be a 4-dimensional array')
# if len(X.kdims) != X.triangle.shape[0]:
# raise ValueError('X.index and X.triangle are misaligned')
# if len(X.vdims) != X.triangle.shape[1]:
# raise ValueError('X.columns and X.triangle are misaligned')
# return X
# return wrapper
class TriangleBase:
def __init__(self, data=None, origin=None, development=None,
columns=None, index=None):
# Sanitize Inputs
columns = [columns] if type(columns) is str else columns
origin = [origin] if type(origin) is str else origin
if development is not None and type(development) is str:
development = [development]
key_gr = origin if not development else origin+development
if not index:
index = ['Total']
data_agg = data.groupby(key_gr).sum().reset_index()
data_agg[index[0]] = 'Total'
else:
data_agg = data.groupby(key_gr+index) \
.sum().reset_index()
# Convert origin/development to dates
origin_date = TriangleBase.to_datetime(data_agg, origin)
self.origin_grain = TriangleBase.get_grain(origin_date)
# These only work with valuation periods and not lags
if development:
development_date = TriangleBase.to_datetime(data_agg, development,
period_end=True)
self.development_grain = TriangleBase.get_grain(development_date)
col = 'development'
else:
development_date = origin_date
self.development_grain = self.origin_grain
col = None
# Prep the data for 4D Triangle
data_agg = self.get_axes(data_agg, index, columns,
origin_date, development_date)
data_agg = pd.pivot_table(data_agg, index=index+['origin'],
columns=col, values=columns,
aggfunc='sum')
# Assign object properties
self.kdims = np.array(data_agg.index.droplevel(-1).unique())
self.odims = np.array(data_agg.index.levels[-1].unique())
if development:
self.ddims = np.array(data_agg.columns.levels[-1].unique())
self.ddims = self.ddims*({'Y': 12, 'Q': 3, 'M': 1}
[self.development_grain])
self.vdims = np.array(data_agg.columns.levels[0].unique())
else:
self.ddims = np.array([None])
self.vdims = np.array(data_agg.columns.unique())
self.ddims = self.ddims
self.valuation_date = development_date.max()
self.key_labels = index
self.iloc = _Ilocation(self)
self.loc = _Location(self)
# Create 4D Triangle
triangle = \
np.reshape(np.array(data_agg), (len(self.kdims), len(self.odims),
len(self.vdims), len(self.ddims)))
triangle = np.swapaxes(triangle, 1, 2)
# Set all 0s to NAN for nansafe ufunc arithmetic
triangle[triangle == 0] = np.nan
self.triangle = triangle
# Used to show NANs in lower part of triangle
self.nan_override = False
self.valuation = self._valuation_triangle()
# ---------------------------------------------------------------- #
# ----------------------- Class Properties ----------------------- #
# ---------------------------------------------------------------- #
def _len_check(self, x, y):
if len(x) != len(y):
raise ValueError(f'Length mismatch: Expected axis has ',
f'{len(x)} elements, new values have',
f' {len(y)} elements')
@property
def shape(self):
return self.triangle.shape
@property
def index(self):
return pd.DataFrame(list(self.kdims), columns=self.key_labels)
@property
def columns(self):
return self.idx_table().columns
@columns.setter
def columns(self, value):
self._len_check(self.columns, value)
self.vdims = [value] if type(value) is str else value
@property
def origin(self):
return pd.DatetimeIndex(self.odims, name='origin')
@origin.setter
def origin(self, value):
self._len_check(self.origin, value)
self.odims = [value] if type(value) is str else value
@property
def development(self):
return pd.Series(list(self.ddims), name='development').to_frame()
@development.setter
def development(self, value):
self._len_check(self.development, value)
self.ddims = [value] if type(value) is str else value
@property
def latest_diagonal(self):
return self.get_latest_diagonal()
@property
# @check_triangle_postcondition
def link_ratio(self):
obj = copy.deepcopy(self)
temp = obj.triangle.copy()
temp[temp == 0] = np.nan
val_array = obj.valuation.values.reshape(obj.shape[-2:],order='f')[:, 1:]
obj.triangle = temp[..., 1:]/temp[..., :-1]
obj.ddims = np.array([f'{obj.ddims[i]}-{obj.ddims[i+1]}'
for i in range(len(obj.ddims)-1)])
# Check whether we want to eliminate the last origin period
if np.max(np.sum(~np.isnan(self.triangle[..., -1, :]), 2)-1) == 0:
obj.triangle = obj.triangle[..., :-1, :]
obj.odims = obj.odims[:-1]
val_array = val_array[:-1, :]
obj.valuation = pd.DatetimeIndex(pd.DataFrame(val_array).unstack().values)
return obj
@property
def age_to_age(self):
return self.link_ratio
# ---------------------------------------------------------------- #
# ---------------------- End User Methods ------------------------ #
# ---------------------------------------------------------------- #
# @check_triangle_postcondition
def get_latest_diagonal(self, compress=True):
''' Method to return the latest diagonal of the triangle. Requires
self.nan_overide == False.
'''
obj = copy.deepcopy(self)
diagonal = obj[obj.valuation == obj.valuation_date].triangle
if compress:
diagonal = np.expand_dims(np.nansum(diagonal, 3), 3)
obj.ddims = ['Latest']
obj.valuation = pd.DatetimeIndex(
[pd.to_datetime(obj.valuation_date)]*len(obj.odims))
obj.triangle = diagonal
return obj
# @check_triangle_postcondition
def incr_to_cum(self, inplace=False):
"""Method to convert an incremental triangle into a cumulative triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
np.cumsum(np.nan_to_num(self.triangle), axis=3, out=self.triangle)
self.triangle = self.expand_dims(self.nan_triangle())*self.triangle
self.triangle[self.triangle == 0] = np.nan
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.incr_to_cum(inplace=True)
# @check_triangle_postcondition
def cum_to_incr(self, inplace=False):
"""Method to convert an cumlative triangle into a incremental triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
temp = np.nan_to_num(self.triangle)[..., 1:] - \
np.nan_to_num(self.triangle)[..., :-1]
temp = np.concatenate((self.triangle[..., 0:1], temp), axis=3)
temp = temp*self.expand_dims(self.nan_triangle())
temp[temp == 0] = np.nan
self.triangle = temp
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.cum_to_incr(inplace=True)
# @check_triangle_postcondition
def grain(self, grain='', incremental=False, inplace=False):
"""Changes the grain of a cumulative triangle.
Parameters
----------
grain : str
The grain to which you want your triangle converted, specified as
'O<x>D<y>' where <x> and <y> can take on values of ``['Y', 'Q', 'M']``
For example, 'OYDY' for Origin Year/Development Year, 'OQDM' for
Origin quarter, etc.
incremental : bool
Not implemented yet
inplace : bool
Whether to mutate the existing Triangle instance or return a new
one.
Returns
-------
Triangle
"""
if inplace:
origin_grain = grain[1:2]
development_grain = grain[-1]
new_tri, o = self._set_ograin(grain=grain, incremental=incremental)
# Set development Grain
dev_grain_dict = {'M': {'Y': 12, 'Q': 3, 'M': 1},
'Q': {'Y': 4, 'Q': 1},
'Y': {'Y': 1}}
if self.shape[3] != 1:
keeps = dev_grain_dict[self.development_grain][development_grain]
keeps = np.where(np.arange(new_tri.shape[3]) % keeps == 0)[0]
keeps = -(keeps + 1)[::-1]
new_tri = new_tri[..., keeps]
self.ddims = self.ddims[keeps]
self.odims = np.unique(o)
self.origin_grain = origin_grain
self.development_grain = development_grain
self.triangle = self._slide(new_tri, direction='l')
self.triangle[self.triangle == 0] = np.nan
self.valuation = self._valuation_triangle()
return self
else:
new_obj = copy.deepcopy(self)
new_obj.grain(grain=grain, incremental=incremental, inplace=True)
return new_obj
def trend(self, trend=0.0, axis=None):
""" Allows for the trending along origin or development
Parameters
----------
trend : float
The amount of the trend
axis : str ('origin' or 'development')
The axis along which to apply the trend factors. The latest period
of the axis is the trend-to period.
Returns
-------
Triangle updated with multiplicative trend applied.
"""
axis = {'origin': -2, 'development': -1}.get(axis, None)
if axis is None:
if self.shape[-2] == 1 and self.shape[-1] != 1:
axis = -1
elif self.shape[-2] != 1 and self.shape[-1] == 1:
axis = -2
else:
raise ValueError('Cannot infer axis, please supply')
trend = (1+trend)**np.arange(self.shape[axis])[::-1]
trend = np.expand_dims(self.expand_dims(trend), -1)
if axis == -1:
trend = np.swapaxes(trend, -2, -1)
obj = copy.deepcopy(self)
obj.triangle = obj.triangle*trend
return obj
def rename(self, axis, value):
if axis == 'index' or axis == 0:
self.index = value
if axis == 'columns' or axis == 1:
self.columns = value
if axis == 'origin' or axis == 2:
self.origin = value
if axis == 'development' or axis == 3:
self.development = value
return self
# ---------------------------------------------------------------- #
# ------------------------ Display Options ----------------------- #
# ---------------------------------------------------------------- #
def __repr__(self):
if (self.triangle.shape[0], self.triangle.shape[1]) == (1, 1):
data = self._repr_format()
return data.to_string()
else:
data = 'Valuation: ' + self.valuation_date.strftime('%Y-%m') + \
'\nGrain: ' + 'O' + self.origin_grain + \
'D' + self.development_grain + \
'\nShape: ' + str(self.shape) + \
'\nindex: ' + str(self.key_labels) + \
'\ncolumns: ' + str(list(self.vdims))
return data
def _repr_html_(self):
''' Jupyter/Ipython HTML representation '''
if (self.triangle.shape[0], self.triangle.shape[1]) == (1, 1):
data = self._repr_format()
if np.nanmean(abs(data)) < 10:
fmt_str = '{0:,.4f}'
elif np.nanmean(abs(data)) < 1000:
fmt_str = '{0:,.2f}'
else:
fmt_str = '{:,.0f}'
if len(self.ddims) > 1 and type(self.ddims[0]) is int:
data.columns = [['Development Lag'] * len(self.ddims),
self.ddims]
default = data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns,
float_format=fmt_str.format) \
.replace('nan', '')
return default.replace(
f'<th></th>\n <th>{self.development.values[0][0]}</th>',
f'<th>Origin</th>\n <th>{self.development.values[0][0]}</th>')
else:
data = pd.Series([self.valuation_date.strftime('%Y-%m'),
'O' + self.origin_grain + 'D'
+ self.development_grain,
self.shape, self.key_labels, list(self.vdims)],
index=['Valuation:', 'Grain:', 'Shape',
'Index:', "Columns:"],
name='Triangle Summary').to_frame()
pd.options.display.precision = 0
return data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns)
def _repr_format(self):
''' Flatten to 2D DataFrame '''
x = self.triangle[0, 0]
if type(self.odims[0]) == np.datetime64:
origin = pd.Series(self.odims).dt.to_period(self.origin_grain)
else:
origin = pd.Series(self.odims)
return pd.DataFrame(x, index=origin, columns=self.ddims)
# ---------------------------------------------------------------- #
# ----------------------- Pandas Passthrus ----------------------- #
# ---------------------------------------------------------------- #
def to_frame(self, *args, **kwargs):
""" Converts a triangle to a pandas.DataFrame. Requires an individual
index and column selection to appropriately grab the 2D DataFrame.
Returns
-------
pandas.DataFrame representation of the Triangle.
"""
axes = [num for num, item in enumerate(self.shape) if item > 1]
if self.shape[:2] == (1, 1):
return self._repr_format()
elif len(axes) == 2:
tri = np.squeeze(self.triangle)
axes_lookup = {0: self.kdims, 1: self.vdims,
2: self.odims, 3: self.ddims}
return pd.DataFrame(tri, index=axes_lookup[axes[0]],
columns=axes_lookup[axes[1]])
else:
raise ValueError('len(index) and len(columns) must be 1.')
def plot(self, *args, **kwargs):
""" Passthrough of pandas functionality """
return self.to_frame().plot(*args, **kwargs)
@property
def T(self):
""" Passthrough of pandas functionality """
return self.to_frame().T
# ---------------------------------------------------------------- #
# ---------------------- Arithmetic Overload --------------------- #
# ---------------------------------------------------------------- #
def _validate_arithmetic(self, obj, other):
other = copy.deepcopy(other)
ddims = None
odims = None
if type(other) not in [int, float, np.float64, np.int64]:
if len(self.vdims) != len(other.vdims):
raise ValueError('Triangles must have the same number of \
columns')
if len(self.kdims) != len(other.kdims):
raise ValueError('Triangles must have the same number of',
'index')
if len(self.vdims) == 1:
other.vdims = np.array([None])
# If broadcasting doesn't work, then try intersecting before
# failure
a, b = self.shape[-2:], other.shape[-2:]
if not (a[0] == 1 or b[0] == 1 or a[0] == b[0]) and \
not (a[1] == 1 or b[1] == 1 or a[1] == b[1]):
ddims = set(self.ddims).intersection(set(other.ddims))
odims = set(self.odims).intersection(set(other.odims))
# Need to set string vs int type-casting
obj = obj[obj.origin.isin(odims)][obj.development.isin(ddims)]
other = other[other.origin.isin(odims)][other.development.isin(ddims)]
obj.odims = np.sort(np.array(list(odims)))
obj.ddims = np.sort(np.array(list(ddims)))
other = other.triangle
return obj, other
# @check_triangle_postcondition
def __add__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle) + np.nan_to_num(other)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __radd__(self, other):
return self if other == 0 else self.__add__(other)
# @check_triangle_postcondition
def __sub__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle) - \
np.nan_to_num(other)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rsub__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(other) - \
np.nan_to_num(obj.triangle)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
def __len__(self):
return self.shape[0]
# @check_triangle_postcondition
def __neg__(self):
obj = copy.deepcopy(self)
obj.triangle = -obj.triangle
return obj
# @check_triangle_postcondition
def __pos__(self):
return self
# @check_triangle_postcondition
def __mul__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle)*other
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rmul__(self, other):
return self if other == 1 else self.__mul__(other)
# @check_triangle_postcondition
def __truediv__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle)/other
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rtruediv__(self, other):
obj = copy.deepcopy(self)
obj.triangle = other / self.triangle
obj.triangle[obj.triangle == 0] = np.nan
return obj
def __eq__(self, other):
if np.all(np.nan_to_num(self.triangle) ==
np.nan_to_num(other.triangle)):
return True
else:
return False
def quantile(self, q, *args, **kwargs):
if self.shape[:2] == (1, 1):
return self.to_frame().quantile(q, *args, **kwargs)
return _TriangleGroupBy(self, by=-1).quantile(q, axis=1)
def groupby(self, by, *args, **kwargs):
if self.shape[:2] == (1, 1):
return self.to_frame().groupby(*args, **kwargs)
return _TriangleGroupBy(self, by)
def idx_table_format(self, idx):
if type(idx) is pd.Series:
# One row or one column selection is it k or v?
if len(set(idx.index).intersection(set(self.vdims))) == len(idx):
# One column selection
idx = idx.to_frame().T
idx.index.names = self.key_labels
else:
# One row selection
idx = idx.to_frame()
elif type(idx) is tuple:
# Single cell selection
idx = self.idx_table().iloc[idx[0]:idx[0] + 1,
idx[1]:idx[1] + 1]
return idx
def idx_table(self):
idx = self.kdims
temp = pd.DataFrame(list(idx), columns=self.key_labels)
for num, item in enumerate(self.vdims):
temp[item] = list(zip(np.arange(len(temp)),
(np.ones(len(temp))*num).astype(int)))
temp.set_index(self.key_labels, inplace=True)
return temp
def __getitem__(self, key):
''' Function for pandas style column indexing'''
if type(key) is pd.DataFrame and 'development' in key.columns:
return self._slice_development(key['development'])
if type(key) is np.ndarray:
# Presumes that if I have a 1D array, I will want to slice origin.
if len(key) == self.shape[-2]*self.shape[-1] and self.shape[-1] > 1:
return self._slice_valuation(key)
return self._slice_origin(key)
if type(key) is pd.Series:
return self.iloc[list(self.index[key].index)]
if key in self.key_labels:
# Boolean-indexing of a particular key
return self.index[key]
idx = self.idx_table()[key]
idx = self.idx_table_format(idx)
return _LocBase(self).get_idx(idx)
def __setitem__(self, key, value):
''' Function for pandas style column indexing setting '''
idx = self.idx_table()
idx[key] = 1
self.vdims = np.array(idx.columns.unique())
self.triangle = np.append(self.triangle, value.triangle, axis=1)
# @check_triangle_postcondition
def append(self, obj, index):
return_obj = copy.deepcopy(self)
x = pd.DataFrame(list(return_obj.kdims), columns=return_obj.key_labels)
new_idx = pd.DataFrame([index], columns=return_obj.key_labels)
x = x.append(new_idx)
x.set_index(return_obj.key_labels, inplace=True)
return_obj.triangle = np.append(return_obj.triangle, obj.triangle,
axis=0)
return_obj.kdims = np.array(x.index.unique())
return return_obj
# @check_triangle_postcondition
def _slice_origin(self, key):
obj = copy.deepcopy(self)
obj.odims = obj.odims[key]
obj.triangle = obj.triangle[..., key, :]
return self._cleanup_slice(obj)
# @check_triangle_postcondition
def _slice_valuation(self, key):
obj = copy.deepcopy(self)
obj.valuation_date = obj.valuation[key].max()
key = key.reshape(self.shape[-2:], order='f')
nan_tri = np.ones(self.shape[-2:])
nan_tri = key*nan_tri
nan_tri[nan_tri == 0] = np.nan
o, d = nan_tri.shape
o_idx = np.arange(o)[list(np.sum(np.isnan(nan_tri), 1) != d)]
d_idx = np.arange(d)[list(np.sum(np.isnan(nan_tri), 0) != o)]
obj.odims = obj.odims[np.sum(np.isnan(nan_tri), 1) != d]
if len(obj.ddims) > 1:
obj.ddims = obj.ddims[np.sum(np.isnan(nan_tri), 0) != o]
obj.triangle = (obj.triangle*nan_tri)
obj.triangle = np.take(np.take(obj.triangle, o_idx, -2), d_idx, -1)
return self._cleanup_slice(obj)
# @check_triangle_postcondition
def _slice_development(self, key):
obj = copy.deepcopy(self)
obj.ddims = obj.ddims[key]
obj.triangle = obj.triangle[..., key]
return self._cleanup_slice(obj)
def _cleanup_slice(self, obj):
obj.valuation = obj._valuation_triangle()
if hasattr(obj, '_nan_triangle'):
# Force update on _nan_triangle at next access.
del obj._nan_triangle
obj._nan_triangle = obj.nan_triangle()
return obj
# ---------------------------------------------------------------- #
# ------------------- Data Ingestion Functions ------------------- #
# ---------------------------------------------------------------- #
def get_date_axes(self, origin_date, development_date):
''' Function to find any missing origin dates or development dates that
would otherwise mess up the origin/development dimensions.
'''
def complete_date_range(origin_date, development_date,
origin_grain, development_grain):
''' Determines origin/development combinations in full. Useful for
when the triangle has holes in it. '''
origin_unique = \
pd.period_range(start=origin_date.min(),
end=origin_date.max(),
freq=origin_grain).to_timestamp()
development_unique = \
pd.period_range(start=origin_date.min(),
end=development_date.max(),
freq=development_grain).to_timestamp()
development_unique = TriangleBase.period_end(development_unique)
# Let's get rid of any development periods before origin periods
cart_prod = TriangleBase.cartesian_product(origin_unique,
development_unique)
cart_prod = cart_prod[cart_prod[:, 0] <= cart_prod[:, 1], :]
return pd.DataFrame(cart_prod, columns=['origin', 'development'])
cart_prod_o = \
complete_date_range(pd.Series(origin_date.min()), development_date,
self.origin_grain, self.development_grain)
cart_prod_d = \
complete_date_range(origin_date, pd.Series(origin_date.max()),
self.origin_grain, self.development_grain)
cart_prod_t = pd.DataFrame({'origin': origin_date,
'development': development_date})
cart_prod = cart_prod_o.append(cart_prod_d) \
.append(cart_prod_t).drop_duplicates()
cart_prod = cart_prod[cart_prod['development'] >= cart_prod['origin']]
return cart_prod
def get_axes(self, data_agg, groupby, columns,
origin_date, development_date):
''' Preps axes for the 4D triangle
'''
date_axes = self.get_date_axes(origin_date, development_date)
kdims = data_agg[groupby].drop_duplicates()
kdims['key'] = 1
date_axes['key'] = 1
all_axes = pd.merge(date_axes, kdims, on='key').drop('key', axis=1)
data_agg = \
all_axes.merge(data_agg, how='left',
left_on=['origin', 'development'] + groupby,
right_on=[origin_date, development_date] + groupby) \
.fillna(0)[['origin', 'development'] + groupby + columns]
data_agg['development'] = \
TriangleBase.development_lag(data_agg['origin'],
data_agg['development'])
return data_agg
# ---------------------------------------------------------------- #
# ------------------- Class Utility Functions -------------------- #
# ---------------------------------------------------------------- #
def nan_triangle(self):
'''Given the current triangle shape and grain, it determines the
appropriate placement of NANs in the triangle for future valuations.
This becomes useful when managing array arithmetic.
'''
if self.triangle.shape[2] == 1 or \
self.triangle.shape[3] == 1 or \
self.nan_override:
# This is reserved for summary arrays, e.g. LDF, Diagonal, etc
# and does not need nan overrides
return np.ones(self.triangle.shape[2:])
if len(self.valuation) != len(self.odims)*len(self.ddims) or not \
hasattr(self, '_nan_triangle'):
self.valuation = self._valuation_triangle()
val_array = self.valuation
val_array = val_array.values.reshape(self.shape[-2:], order='f')
nan_triangle = np.array(
pd.DataFrame(val_array) > self.valuation_date)
nan_triangle = np.where(nan_triangle, np.nan, 1)
self._nan_triangle = nan_triangle
return self._nan_triangle
def _valuation_triangle(self, ddims=None):
''' Given origin and development, develop a triangle of valuation
dates.
'''
ddims = self.ddims if ddims is None else ddims
if ddims[0] is None:
ddims = pd.Series([self.valuation_date]*len(self.origin))
return pd.DatetimeIndex(ddims.values)
special_cases = dict(Ultimate='2262-03-01', Latest=self.valuation_date)
if ddims[0] in special_cases.keys():
return pd.DatetimeIndex([pd.to_datetime(special_cases[ddims[0]])] *
len(self.origin))
if type(ddims[0]) is np.str_:
ddims = [int(item[:item.find('-'):]) for item in ddims]
origin = pd.PeriodIndex(self.odims, freq=self.origin_grain) \
.to_timestamp(how='s')
origin = pd.Series(origin)
# Limit origin to valuation date
origin[origin > self.valuation_date] = self.valuation_date
next_development = origin+pd.DateOffset(days=-1, months=ddims[0])
val_array = np.expand_dims(np.array(next_development), -1)
for item in ddims[1:]:
if item == 9999:
next_development = pd.Series([pd.to_datetime('2262-03-01')] *
len(origin))
next_development = np.expand_dims(np.array(
next_development), -1)
else:
next_development = np.expand_dims(
np.array(origin+pd.DateOffset(days=-1, months=item)), -1)
val_array = np.concatenate((val_array, next_development), -1)
return pd.DatetimeIndex(pd.DataFrame(val_array).unstack().values)
def _slide(self, triangle, direction='r'):
''' Facilitates swapping alignment of triangle between development
period and development date. '''
obj = copy.deepcopy(self)
obj.triangle = triangle
nan_tri = obj.nan_triangle()
r = (nan_tri.shape[1] - np.nansum(nan_tri, axis=1)).astype(int)
r = -r if direction == 'l' else r
k, v, rows, column_indices = \
np.ogrid[:triangle.shape[0], :triangle.shape[1],
:triangle.shape[2], :triangle.shape[3]]
r[r < 0] += nan_tri.shape[1]
column_indices = column_indices - r[:, np.newaxis]
return triangle[k, v, rows, column_indices]
def expand_dims(self, tri_2d):
'''Expands from one 2D triangle to full 4D object
'''
k = len(self.kdims)
v = len(self.vdims)
tri_3d = np.repeat(np.expand_dims(tri_2d, axis=0), v, axis=0)
return np.repeat(np.expand_dims(tri_3d, axis=0), k, axis=0)
# @check_triangle_postcondition
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
@staticmethod
def to_datetime(data, fields, period_end=False):
'''For tabular form, this will take a set of data
column(s) and return a single date array.
'''
# Concat everything into one field
if len(fields) > 1:
target_field = pd.Series(index=data.index).fillna('')
for item in fields:
target_field = target_field + data[item].astype(str)
else:
target_field = data[fields[0]]
# pandas is not good at inferring YYYYMM format so trying that first
# and if it fails, move on to how pandas infers things.
datetime_arg = target_field.unique()
date_inference_list = \
[{'arg': datetime_arg, 'format': '%Y%m'},
{'arg': datetime_arg, 'format': '%Y'},
{'arg': datetime_arg, 'infer_datetime_format': True}]
for item in date_inference_list:
try:
arr = dict(zip(datetime_arg, pd.to_datetime(**item)))
break
except:
pass
target = target_field.map(arr)
if period_end:
target = TriangleBase.period_end(target)
return target
@staticmethod
def development_lag(origin, development):
''' For tabular format, this will convert the origin/development
difference to a development lag '''
year_diff = development.dt.year - origin.dt.year
if np.all(origin != development):
development_grain = TriangleBase.get_grain(development)
else:
development_grain = 'M'
if development_grain == 'Y':
return year_diff + 1
if development_grain == 'Q':
quarter_diff = development.dt.quarter - origin.dt.quarter
return year_diff * 4 + quarter_diff + 1
if development_grain == 'M':
month_diff = development.dt.month - origin.dt.month
return year_diff * 12 + month_diff + 1
@staticmethod
def period_end(array):
if type(array) is not pd.DatetimeIndex:
array_lookup = len(set(array.dt.month))
else:
array_lookup = len(set(array.month))
offset = {12: | pd.tseries.offsets.MonthEnd() | pandas.tseries.offsets.MonthEnd |
import pandas as pd
from scoreware.race.utils import get_last_name
def parse_general(df, headers, id):
newdf=pd.DataFrame()
print((type(headers)))
for key in headers:
print((headers[key]))
for column in df.columns:
if column.lower() in headers[key]:
print((column.lower()+' matches'))
print(key)
print(key=='name')
#if (key=='time'):
# df[column]=df[column].replace('nan', method='bfill')
# df[column]=df[column].fillna(method='bfill')
# print((df[column].loc[1:10]))
# df[column]=df[column].astype(str)
# print((df[column].loc[230:240]))
# newdf['time']=df[column].apply(lambda x: '00:'+x.split(':')[0]+':'+x.split(':')[1])
if (key=='full_name'):
df[column]=df[column].fillna(value='none none')
df[column]=df[column].replace('nan', value='none none')
df[column]=df[column].astype(str)
newdf['first_name']=df[column].apply(lambda x: x.split()[0])
#newdf['last_name']=df[column].apply(lambda x: x.split()[-1])
newdf['last_name']=df[column].apply(lambda x: get_last_name(x))
print(newdf['last_name'])
else:
if (key=='age'):
df[column]=df[column].fillna(value=-1)
else:
df[column]=df[column].fillna(value='none')
newdf[key]=df[column]
newdf['race_id']=id
return newdf
def parse_general2(df, headers, id):
newdf= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8
"""Module for tespy network class.
The network is the container for every TESPy simulation. The network class
automatically creates the system of equations describing topology and
parametrisation of a specific model and solves it.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/networks/networks.py
SPDX-License-Identifier: MIT
"""
# reading .csv
import ast
# ordered dicts for fluid composition vector
from collections import Counter, OrderedDict
# calculation of molar masses and gas constants
from CoolProp.CoolProp import PropsSI as CPPSI
# logging messages
import logging
# numpy functions
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
# checking/creating folders
import os
# DataFrames for connections and components
import pandas as pd
# printing results
from tabulate import tabulate
from tespy import connections as con
from tespy.components.basics import (sink, source, subsystem_interface,
cycle_closer)
from tespy.components.combustion import combustion_chamber, combustion_engine
from tespy.components.heat_exchangers import heat_exchanger
from tespy.components.nodes import drum, merge, splitter
from tespy.components.reactors import water_electrolyzer
from tespy.tools import data_containers as dc
from tespy.tools import fluid_properties as fp
from tespy.tools import helpers as hlp
from tespy.tools.global_vars import molar_masses, gas_constants, err
# calculation time
from time import time
class network:
r"""
Class component is the base class of all TESPy components.
Parameters
----------
fluids : list
A list of all fluids within the network container.
m_unit : str
Specify the unit for mass flow: 'kg / s', 't / h'.
v_unit : str
Specify the unit for volumetric flow: 'm3 / s', 'm3 / h', 'l / s',
'l / h'.
p_unit : str
Specify the unit for pressure: 'Pa', 'psi', 'bar', 'MPa'.
h_unit : str
Specify the unit for mass flow: 'J / kg', 'kJ / kg', 'MJ / kg'.
T_unit : str
Specify the unit for mass flow: 'K', 'C', 'F'.
p_range : list
List with minimum and maximum values for pressure value range.
h_range : list
List with minimum and maximum values for enthalpy value range.
T_range : list
List with minimum and maximum values for temperature value range.
iterinfo : boolean
Print convergence progress to console.
Note
----
Unit specification is optional: If not specified the SI unit (first
element in above lists) will be applied!
Range specification is optional, too. The value range is used to stabilise
the newton algorith. For more information see the "getting started" section
in the online-documentation.
Example
-------
Basic example for a setting up a tespy.networks.network object. Specifying
the fluids is mandatory! Unit systems, fluid property range and iterinfo
are optional.
Standard value for iterinfo is True. This will print out convergence
progress to the console. You can suop the printouts by setting this
property to false.
>>> from tespy.networks import network
>>> fluid_list = ['water', 'air', 'R134a']
>>> mynetwork = network(fluids=fluid_list, p_unit='bar', T_unit='C')
>>> mynetwork.set_attr(p_range=[1, 10])
>>> type(mynetwork)
<class 'tespy.networks.networks.network'>
>>> mynetwork.set_attr(iterinfo=False)
>>> mynetwork.iterinfo
False
>>> mynetwork.set_attr(iterinfo=True)
>>> mynetwork.iterinfo
True
A simple network consisting of a source, a pipe and a sink. This example
shows how the printout parameter can be used. We specify
:code:`printout=False` for both connections, the pipe as well as the heat
bus. Therefore the :code:`.print_results()` method should not print any
results.
>>> from tespy.networks import network
>>> from tespy.components import source, sink, pipe
>>> from tespy.connections import connection, bus
>>> nw = network(['CH4'], T_unit='C', p_unit='bar', v_unit='m3 / s')
>>> so = source('source')
>>> si = sink('sink')
>>> p = pipe('pipe', Q=0, pr=0.95, printout=False)
>>> a = connection(so, 'out1', p, 'in1')
>>> b = connection(p, 'out1', si, 'in1')
>>> nw.add_conns(a, b)
>>> a.set_attr(fluid={'CH4': 1}, T=30, p=10, m=10, printout=False)
>>> b.set_attr(printout=False)
>>> b = bus('heat bus')
>>> b.add_comps({'c': p})
>>> nw.add_busses(b)
>>> b.set_attr(printout=False)
>>> nw.set_attr(iterinfo=False)
>>> nw.solve('design')
>>> nw.print_results()
"""
def __init__(self, fluids, **kwargs):
# initialisation of basic properties
self.checked = False
# in case of a design calculation after an offdesign calculation
self.redesign = False
# connection dataframe
self.conns = pd.DataFrame(columns=['s', 's_id', 't', 't_id'])
# list for busses
self.busses = OrderedDict()
# default design_path value
self.design_path = None
# fluid list and constants
if isinstance(fluids, list):
self.fluids = sorted(fluids)
else:
msg = ('Please provide a list containing the network\'s fluids on '
'creation.')
logging.error(msg)
raise TypeError(msg)
msg = 'Network fluids are: '
for f in self.fluids:
msg += f + ', '
if 'INCOMP::' in f:
# molar mass and gas constant not available for incompressibles
molar_masses[f] = 1
gas_constants[f] = 1
elif 'TESPy::' not in f:
# calculating molar masses/gas constants for network's fluids
# tespy_fluid molar mass/gas constant are added on lut creation
molar_masses[f] = CPPSI('M', f)
gas_constants[f] = CPPSI('GAS_CONSTANT', f)
msg = msg[:-2] + '.'
logging.debug(msg)
# initialise fluid property memorisation function for this network
fp.memorise.add_fluids(self.fluids)
# available unit systems
# mass flow
self.m = {
'kg / s': 1,
't / h': 3.6
}
# pressure
self.p = {
'Pa': 1,
'psi': 6.8948e3,
'bar': 1e5,
'MPa': 1e6
}
# enthalpy
self.h = {
'J / kg': 1,
'kJ / kg': 1e3,
'MJ / kg': 1e6
}
# temperature
self.T = {
'C': [273.15, 1],
'F': [459.67, 5 / 9],
'K': [0, 1]
}
# volumetric flow
self.v = {
'm3 / s': 1,
'l / s': 1e-3,
'm3 / h': 1 / 3600,
'l / h': 1 / 3.6
}
# SI unit specification
self.SI_units = {
'm': 'kg / s',
'p': 'Pa',
'h': 'J / kg',
'T': 'K',
'v': 'm3 / s'
}
# iterinfo
self.iterinfo = True
# standard unit set
self.m_unit = self.SI_units['m']
self.p_unit = self.SI_units['p']
self.h_unit = self.SI_units['h']
self.T_unit = self.SI_units['T']
self.v_unit = self.SI_units['v']
msg = ('Default unit specifications: '
'mass flow: ' + self.m_unit + ', ' +
'pressure: ' + self.p_unit + ', ' +
'enthalpy: ' + self.h_unit + ', ' +
'temperature: ' + self.T_unit + ', ' +
'volumetric flow: ' + self.v_unit + '.')
logging.debug(msg)
# generic value range
self.m_range_SI = np.array([-1e12, 1e12])
self.p_range_SI = np.array([2e2, 300e5])
self.h_range_SI = np.array([1e3, 7e6])
self.T_range_SI = np.array([273.16, 1773.15])
msg = ('Default mass flow limits, '
'min: ' + str(self.m_range_SI[0]) + ' ' + self.m_unit +
', max: ' + str(self.m_range_SI[1]) + ' ' + self.m_unit + ', ')
logging.debug(msg)
msg = ('Default pressure limits, '
'min: ' + str(self.p_range_SI[0]) + ' ' + self.p_unit +
', max: ' + str(self.p_range_SI[1]) + ' ' + self.p_unit + ', ')
logging.debug(msg)
msg = ('Default enthalpy limits, '
'min: ' + str(self.h_range_SI[0]) + ' ' + self.h_unit +
', max: ' + str(self.h_range_SI[1]) + ' ' + self.h_unit + ', ')
logging.debug(msg)
msg = ('Default temperature limits, '
'min: ' + str(self.T_range_SI[0]) + ' ' + self.T_unit +
', max: ' + str(self.T_range_SI[1]) + ' ' + self.T_unit + ', ')
logging.debug(msg)
self.set_attr(**kwargs)
def set_attr(self, **kwargs):
r"""
Set, resets or unsets attributes of a network.
Parameters
----------
m_unit : str
Specify the unit for mass flow: 'kg / s', 't / h'.
v_unit : str
Specify the unit for volumetric flow: 'm3 / s', 'm3 / h', 'l / s',
'l / h'.
p_unit : str
Specify the unit for pressure: 'Pa', 'psi', 'bar', 'MPa'.
h_unit : str
Specify the unit for enthalpy: 'J / kg', 'kJ / kg', 'MJ / kg'.
T_unit : str
Specify the unit for temperature: 'K', 'C', 'F'.
m_range : list
List with minimum and maximum values for mass flow value range.
p_range : list
List with minimum and maximum values for pressure value range.
h_range : list
List with minimum and maximum values for enthalpy value range.
T_range : list
List with minimum and maximum values for temperature value range.
iterinfo : boolean
Print convergence progress to console.
"""
# unit sets
if 'm_unit' in kwargs.keys():
if kwargs['m_unit'] not in self.m.keys():
msg = ('Allowed units for mass flow are: ' +
str(self.m.keys()))
logging.error(msg)
raise ValueError(msg)
else:
self.m_unit = kwargs['m_unit']
msg = 'Setting mass flow unit: ' + self.m_unit
logging.debug(msg)
if 'p_unit' in kwargs.keys():
if kwargs['p_unit'] not in self.p.keys():
msg = ('Allowed units for pressure are: ' + str(self.p.keys()))
logging.error(msg)
raise ValueError(msg)
else:
self.p_unit = kwargs['p_unit']
msg = 'Setting pressure unit: ' + self.p_unit
logging.debug(msg)
if 'h_unit' in kwargs.keys():
if kwargs['h_unit'] not in self.h.keys():
msg = ('Allowed units for pressure are: ' + str(self.h.keys()))
logging.error(msg)
raise ValueError(msg)
else:
self.h_unit = kwargs['h_unit']
msg = 'Setting enthalpy unit: ' + self.h_unit
logging.debug(msg)
if 'T_unit' in kwargs.keys():
if kwargs['T_unit'] not in self.T.keys():
msg = ('Allowed units for pressure are: ' + str(self.T.keys()))
logging.error(msg)
raise ValueError(msg)
else:
self.T_unit = kwargs['T_unit']
msg = 'Setting temperature unit: ' + self.T_unit
logging.debug(msg)
if 'v_unit' in kwargs.keys():
if kwargs['v_unit'] not in self.v.keys():
msg = ('Allowed units for pressure are: ' + str(self.v.keys()))
logging.error(msg)
raise ValueError(msg)
else:
self.v_unit = kwargs['v_unit']
msg = 'Setting volumetric flow unit: ' + self.v_unit
logging.debug(msg)
# value ranges
if 'm_range' in kwargs.keys():
if not isinstance(kwargs['m_range'], list):
msg = ('Specify the value range as list: [m_min, m_max]')
logging.error(msg)
raise TypeError(msg)
else:
self.m_range_SI = (np.array(kwargs['m_range']) *
self.m[self.m_unit])
msg = ('Setting mass flow limits, min: ' +
str(self.m_range_SI[0]) + ' ' + self.SI_units['m'] +
', max: ' + str(self.m_range_SI[1]) + ' ' +
self.SI_units['m'] + '.')
logging.debug(msg)
if 'p_range' in kwargs.keys():
if not isinstance(kwargs['p_range'], list):
msg = ('Specify the value range as list: [p_min, p_max]')
logging.error(msg)
raise TypeError(msg)
else:
self.p_range_SI = (np.array(kwargs['p_range']) *
self.p[self.p_unit])
msg = ('Setting pressure limits, min: ' +
str(self.p_range_SI[0]) + ' ' + self.SI_units['p'] +
', max: ' + str(self.p_range_SI[1]) + ' ' +
self.SI_units['p'] + '.')
logging.debug(msg)
if 'h_range' in kwargs.keys():
if not isinstance(kwargs['h_range'], list):
msg = ('Specify the value range as list: [h_min, h_max]')
logging.error(msg)
raise TypeError(msg)
else:
self.h_range_SI = (np.array(kwargs['h_range']) *
self.h[self.h_unit])
msg = ('Setting enthalpy limits, min: ' +
str(self.h_range_SI[0]) + ' ' + self.SI_units['h'] +
', max: ' + str(self.h_range_SI[1]) + ' ' +
self.SI_units['h'] + '.')
logging.debug(msg)
if 'T_range' in kwargs.keys():
if not isinstance(kwargs['T_range'], list):
msg = ('Specify the value range as list: [T_min, T_max]')
logging.error(msg)
raise TypeError(msg)
else:
self.T_range_SI = ((np.array(kwargs['T_range']) +
self.T[self.T_unit][0]) *
self.T[self.T_unit][1])
msg = ('Setting temperature limits, min: ' +
str(self.T_range_SI[0]) + ' ' + self.SI_units['T'] +
', max: ' + str(self.T_range_SI[1]) + ' ' +
self.SI_units['T'] + '.')
logging.debug(msg)
# update non SI value ranges
self.m_range = self.m_range_SI / self.m[self.m_unit]
self.p_range = self.p_range_SI / self.p[self.p_unit]
self.h_range = self.h_range_SI / self.h[self.h_unit]
self.T_range = (self.T_range_SI / self.T[self.T_unit][1] -
self.T[self.T_unit][0])
for f in self.fluids:
if 'TESPy::' in f:
fp.memorise.vrange[f][0] = self.p_range_SI[0]
fp.memorise.vrange[f][1] = self.p_range_SI[1]
fp.memorise.vrange[f][2] = self.T_range_SI[0]
fp.memorise.vrange[f][3] = self.T_range_SI[1]
self.iterinfo = kwargs.get('iterinfo', self.iterinfo)
if not isinstance(self.iterinfo, bool):
msg = ('Network parameter iterinfo must be True or False!')
logging.error(msg)
raise TypeError(msg)
def get_attr(self, key):
r"""
Get the value of a network attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = 'Network has no attribute \"' + str(key) + '\".'
logging.error(msg)
raise KeyError(msg)
def add_subsys(self, *args):
r"""
Add one or more subsystems to the network.
Parameters
----------
c : tespy.components.subsystems.subsystem
The subsystem to be added to the network, subsystem objects si
:code:`network.add_subsys(s1, s2, s3, ...)`.
"""
for subsys in args:
for c in subsys.conns.values():
self.add_conns(c)
def add_nwks(self, *args):
"""
Add connections from a different network.
:param args: network objects si :code:`add_subsys(s1, s2, s3, ...)`
:type args: tespy.networks.network
:returns: no return value
"""
for nw in args:
for c in nw.conns.index:
self.add_conns(c)
def add_conns(self, *args):
r"""
Add one or more connections to the network.
Parameters
----------
c : tespy.connections.connection
The connection to be added to the network, connections objects ci
:code:`add_conns(c1, c2, c3, ...)`.
"""
for c in args:
if not isinstance(c, con.connection):
msg = ('Must provide tespy.connections.connection objects as '
'parameters.')
logging.error(msg)
raise TypeError(msg)
self.conns.loc[c] = [c.s, c.s_id, c.t, c.t_id]
msg = ('Added connection ' + c.s.label + ' (' + c.s_id + ') -> ' +
c.t.label + ' (' + c.t_id + ') to network.')
logging.debug(msg)
# set status "checked" to false, if conneciton is added to network.
self.checked = False
def del_conns(self, *args):
"""
Remove one or more connections from the network.
Parameters
----------
c : tespy.connections.connection
The connection to be removed from the network, connections objects
ci :code:`del_conns(c1, c2, c3, ...)`.
"""
for c in args:
self.conns = self.conns.drop(c)
msg = ('Deleted connection ' + c.s.label + ' (' + c.s_id +
') -> ' + c.t.label + ' (' + c.t_id + ') from network.')
logging.debug(msg)
# set status "checked" to false, if conneciton is deleted from network.
self.checked = False
def check_conns(self):
r"""Check connections for multiple usage of inlets or outlets."""
dub = self.conns.loc[self.conns.duplicated(['s', 's_id']) == True]
for c in dub.index:
targets = ''
for conns in self.conns[(self.conns.s == c.s) &
(self.conns.s_id == c.s_id)].index:
targets += conns.t.label + ' (' + conns.t_id + '); '
msg = ('The source ' + c.s.label + ' (' + c.s_id + ') is attached '
'to more than one target: ' + targets[:-2] + '. '
'Please check your network.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
dub = self.conns.loc[self.conns.duplicated(['t', 't_id']) == True]
for c in dub.index:
sources = ''
for conns in self.conns[(self.conns.t == c.t) &
(self.conns.t_id == c.t_id)].index:
sources += conns.s.label + ' (' + conns.s_id + '); '
msg = ('The target ' + c.t.label + ' (' + c.t_id + ') is attached '
'to more than one source: ' + sources[:-2] + '. '
'Please check your network.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
def add_busses(self, *args):
r"""
Add one or more busses to the network.
Parameters
----------
b : tespy.connections.bus
The bus to be added to the network, bus objects bi
:code:`add_busses(b1, b2, b3, ...)`.
"""
for b in args:
if self.check_busses(b):
self.busses[b.label] = b
msg = 'Added bus ' + b.label + ' to network.'
logging.debug(msg)
def del_busses(self, *args):
r"""
Remove one or more busses from the network.
Parameters
----------
b : tespy.connections.bus
The bus to be removed from the network, bus objects bi
:code:`add_busses(b1, b2, b3, ...)`.
"""
for b in args:
if b in self.busses.values():
del self.busses[b.label]
msg = 'Deleted bus ' + b.label + ' from network.'
logging.debug(msg)
def check_busses(self, b):
r"""
Checksthe busses to be added for type, duplicates and identical labels.
Parameters
----------
b : tespy.connections.bus
The bus to be checked.
"""
if isinstance(b, con.bus):
if len(self.busses) > 0:
if b in self.busses.values():
msg = ('Network contains the bus ' + b.label + ' (' +
str(b) + ') already.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
elif b.label in self.busses.keys():
msg = ('Network already has a bus with the name ' +
b.label + '.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
else:
return True
else:
return True
else:
msg = 'Only objects of type bus are allowed in *args.'
logging.error(msg)
raise TypeError(msg)
def check_network(self):
r"""Check if components are connected properly within the network."""
self.check_conns()
# get unique components in connections dataframe
comps = pd.unique(self.conns[['s', 't']].values.ravel())
# build the dataframe for components
self.init_components(comps)
# count number of incoming and outgoing connections and compare to
# expected values
for comp in self.comps.index:
num_o = (self.conns[['s', 't']] == comp).sum().s
num_i = (self.conns[['s', 't']] == comp).sum().t
if num_o != comp.num_o:
msg = (comp.label + ' is missing ' + str(comp.num_o - num_o) +
' outgoing connections. Make sure all outlets are '
' connected and all connections have been added to the '
'network.')
logging.error(msg)
# raise an error in case network check is unsuccesful
raise hlp.TESPyNetworkError(msg)
elif num_i != comp.num_i:
msg = (comp.label + ' is missing ' + str(comp.num_i - num_i) +
' incoming connections. Make sure all inlets are '
' connected and all connections have been added to the '
'network.')
logging.error(msg)
# raise an error in case network check is unsuccesful
raise hlp.TESPyNetworkError(msg)
# network checked
self.checked = True
msg = 'Networkcheck successful.'
logging.info(msg)
def init_components(self, comps):
r"""
Set up a dataframe for the network's components.
Additionally, check, if all components have unique labels.
Note
----
The dataframe for the components is derived from the network's
connections. Thus it does not hold any additional information, the
dataframe is used to simplify the code, only.
"""
self.comps = pd.DataFrame(index=comps, columns=['i', 'o'])
labels = []
for comp in self.comps.index:
# get for incoming and outgoing connections of a component
s = self.conns[self.conns.s == comp]
s = s.s_id.sort_values().index
t = self.conns[self.conns.t == comp]
t = t.t_id.sort_values().index
self.comps.loc[comp] = [t, s]
# save the incoming and outgoing as well as the number of
# connections as component attribute
comp.inl = t.tolist()
comp.outl = s.tolist()
comp.num_i = len(comp.inlets())
comp.num_o = len(comp.outlets())
labels += [comp.label]
# save the connection locations to the components
comp.conn_loc = []
for c in comp.inl + comp.outl:
comp.conn_loc += [self.conns.index.get_loc(c)]
# check for duplicates in the component labels
if len(labels) != len(list(set(labels))):
duplicates = [item for item, count in
Counter(labels).items() if count > 1]
msg = ('All Components must have unique labels, duplicates are: '
+ str(duplicates) + '.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
def initialise(self):
r"""
Initilialise the network depending on calclation mode.
Design
- Generic fluid composition and fluid property initialisation.
- Starting values from initialisation path if provided.
Offdesign
- Check offdesign path specification.
- Set component and connection design point properties.
- Switch from design/offdesign parameter specification.
"""
if len(self.conns) == 0:
msg = ('No connections have been added to the network, please '
'make sure to add your connections with the '
'.add_conns() method.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
if len(self.fluids) == 0:
msg = ('Network has no fluids, please specify a list with fluids '
'on network creation.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
if self.mode == 'offdesign':
self.redesign = True
if self.design_path is None:
# must provide design_path
msg = ('Please provide \'design_path\' for every offdesign '
'calculation.')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
else:
# load design case
if self.new_design is True:
self.init_offdesign_params()
self.init_offdesign()
else:
# load design case
self.init_design()
# generic fluid initialisation
self.init_fluids()
# generic fluid property initialisation
self.init_properties()
# starting values from init path
if self.init_path is not None:
self.init_csv()
msg = 'Network initialised.'
logging.info(msg)
def init_design(self):
r"""
Initialise a design calculation.
Offdesign parameters are unset, design parameters are set. If
:code:`local_offdesign` is True for connections or components, the
design point information are read from the .csv-files in the respective
:code:`design_path`. In this case, the design values are unset, the
offdesign values set.
"""
# connections
for c in self.conns.index:
# read design point information of connections with
# local_offdesign activated from their respective design path
if c.local_offdesign is True:
if c.design_path is None:
msg = ('The parameter local_offdesign is True for the '
'connection ' + c.s.label + '(' + c.s_id + ') -> ' +
c.t.label + '(' + c.t_id + '), an individual '
'design_path must be specified in this case!')
logging.error(msg)
raise hlp.TESPyNetworkError(msg)
# unset design parameters
for var in c.design:
c.get_attr(var).set_attr(val_set=False)
# set offdesign parameters
for var in c.offdesign:
c.get_attr(var).set_attr(val_set=True)
# read design point information
path = hlp.modify_path_os(c.design_path + '/conn.csv')
msg = ('Reading individual design point information for '
'connection ' + c.s.label + '(' + c.s_id + ') -> ' +
c.t.label + '(' + c.t_id + ') from path ' +
path + '.')
logging.debug(msg)
df = pd.read_csv(path, index_col=0, delimiter=';', decimal='.')
# write data to connections
self.init_conn_design_params(c, df)
else:
# unset all design values
c.m.design = np.nan
c.p.design = np.nan
c.h.design = np.nan
c.fluid.design = OrderedDict()
c.new_design = True
# switch connections to design mode
if self.redesign is True:
for var in c.design:
c.get_attr(var).set_attr(val_set=True)
for var in c.offdesign:
c.get_attr(var).set_attr(val_set=False)
# unset design values for busses
for b in self.busses.values():
for cp in b.comps.index:
b.comps.loc[cp].P_ref = np.nan
series = pd.Series()
for cp in self.comps.index:
# read design point information of components with
# local_offdesign activated from their respective design path
if cp.local_offdesign is True:
if cp.design_path is not None:
# get type of component (class name)
c = cp.__class__.__name__
# read design point information
path = hlp.modify_path_os(cp.design_path + '/comps/' + c +
'.csv')
df = pd.read_csv(path, sep=';', decimal='.',
converters={
'busses': ast.literal_eval,
'bus_P_ref': ast.literal_eval
})
df.set_index('label', inplace=True)
# write data
self.init_comp_design_params(cp, df.loc[cp.label])
# unset design parameters
for var in cp.design:
cp.get_attr(var).set_attr(is_set=False)
# set offdesign parameters
switched = False
msg = 'Set component attributes '
for var in cp.offdesign:
# set variables provided in .offdesign attribute
data = cp.get_attr(var)
data.set_attr(is_set=True)
# take nominal values from design point
if isinstance(data, dc.dc_cp):
cp.get_attr(var).val = cp.get_attr(var).design
switched = True
msg += var + ', '
if switched:
msg = (msg[:-2] + ' to design value at component ' +
cp.label + '.')
logging.debug(msg)
cp.new_design = False
else:
# switch connections to design mode
if self.redesign is True:
for var in cp.design:
cp.get_attr(var).set_attr(is_set=True)
for var in cp.offdesign:
cp.get_attr(var).set_attr(is_set=False)
cp.set_parameters(self.mode, series)
# component initialisation
cp.comp_init(self)
def init_offdesign_params(self):
r"""
Read design point information from specified :code:`design_path`.
If a :code:`design_path` has been specified individually for components
or connections, the data will be read from the specified individual
path instead.
Note
----
The methods
:func:`tespy.networks.network.init_comp_design_params` (components) and
the :func:`tespy.networks.network.init_conn_design_params`
(connections) handle the parameter specification.
"""
# components without any parameters
not_required = ['source', 'sink', 'node', 'merge', 'splitter',
'separator', 'drum', 'subsystem_interface']
# fetch all components, reindex with label
cp_sort = self.comps.copy()
# get class name
cp_sort['cp'] = cp_sort.apply(network.get_class_base, axis=1)
cp_sort['label'] = cp_sort.apply(network.get_props, axis=1,
args=('label',))
cp_sort['comp'] = cp_sort.index
cp_sort.set_index('label', inplace=True)
# iter through unique types of components (class names)
for c in cp_sort.cp.unique():
if c not in not_required:
path = hlp.modify_path_os(self.design_path +
'/comps/' + c + '.csv')
msg = ('Reading design point information for components of '
'type ' + c + ' from path ' + path + '.')
logging.debug(msg)
# read data
df = pd.read_csv(path, sep=';', decimal='.',
converters={'busses': ast.literal_eval,
'bus_P_ref': ast.literal_eval})
df.set_index('label', inplace=True)
# iter through all components of this type and set data
for c_label in df.index:
comp = cp_sort.loc[c_label].comp
# read data of components with individual design_path
if comp.design_path is not None:
path_c = hlp.modify_path_os(comp.design_path +
'/comps/' + c + '.csv')
df_c = pd.read_csv(path_c, sep=';', decimal='.',
converters={
'busses': ast.literal_eval,
'bus_P_ref': ast.literal_eval
})
df_c.set_index('label', inplace=True)
data = df_c.loc[comp.label]
else:
data = df.loc[comp.label]
# write data to components
self.init_comp_design_params(comp, data)
msg = 'Read design point information for components.'
logging.debug(msg)
# read connection design point information
path = hlp.modify_path_os(self.design_path + '/conn.csv')
df = | pd.read_csv(path, index_col=0, delimiter=';', decimal='.') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 21:14:42 2020
@author: hiroyasu
"""
import torch
import numpy as np
import matplotlib.pyplot as plt
import RNNtraining as rnnl
import CCMsampling as sccm
import pandas as pd
import pickle
dt = sccm.dt*0.1
tspan = sccm.tspan
#tspan = np.array([0,20])
s = sccm.s
b = sccm.b
r = sccm.r
ratio_p = sccm.ratio_p*1
C = sccm.C
B = sccm.B
D = sccm.D
dim = sccm.dim
d1_over = sccm.d1_over
d2_over = sccm.d2_over
b_over = sccm.b_over
c_over = sccm.c_over
d_over = sccm.d_over
RungeNum = sccm.RungeNum
RungeNum = 1
N = np.load('data/params/optimal/N.npy')
alp = alp = np.load('data/params/optimal/alp.npy')
optvals = np.load('data/params/optimal/optvals.npy')
optval = np.mean(optvals)
n_input = rnnl.n_input
n_hidden = rnnl.n_hidden
n_output = rnnl.n_output
n_layers = rnnl.n_layers
class Lorenz:
def __init__(self,X,t,dt,runge_num=RungeNum):
self.X = X
self.t = t
self.dt = dt
self.runge_num = runge_num
self.h = dt/runge_num
def dynamics(self,state,time):
x1 = state[0]
x2 = state[1]
x3 = state[2]
dx1dt = s*(x2-x1);
dx2dt = x1*(r-x3)-x2;
dx3dt = x1*x2-b*x3;
return np.array([dx1dt,dx2dt,dx3dt])
def measurement(self):
Xv = (np.array([self.X])).T
d2 = (np.random.rand(1,1)*2-1)*ratio_p
y = C@Xv+D@d2
y = y[:,0]
return y
def rk4(self):
X = self.X
t = self.t
h = self.h
k1 = self.dynamics(X,t)
k2 = self.dynamics(X+k1*h/2.,t+h/2.)
k3 = self.dynamics(X+k2*h/2.,t+h/2.)
k4 = self.dynamics(X+k3*h,t+h)
return X+h*(k1+2.*k2+2.*k3+k4)/6., t+h
def one_step_sim(self):
runge_num = self.runge_num
for num in range(0, runge_num): #self.X, self.tを更新することで全体で更新
self.X, self.t = self.rk4()
d1 = (np.random.rand(3,1)*2-1)*ratio_p
dist_dt = B@d1*self.dt
self.X = self.X+dist_dt[:,0]
return self.X, self.t
class Estimator:
def __init__(self,Z,t,dt,runge_num=RungeNum):
self.Z = Z
self.t = t
self.ns = Z.shape[0]
self.runge_num = runge_num
self.h = dt/runge_num
self.net = rnnl.RNN(n_input,n_hidden,n_output,n_layers)
self.net.load_state_dict(torch.load('data/trained_nets/RNNLorenz.pt'))
self.net.eval()
self.YC = np.load('data/trained_nets/Y_params.npy')
#self.net = torch.load('data/TrainedNets/NNLorenz.pt')
self.RNN = 1
def rk4(self,L,y):
Z = self.Z
t = self.t
h = self.h
k1 = self.estimator(Z,t,L,y)
k2 = self.estimator(Z+k1*h/2.,t+h/2.,L,y)
k3 = self.estimator(Z+k2*h/2.,t+h/2.,L,y)
k4 = self.estimator(Z+k3*h,t+h,L,y)
return Z+h*(k1+2.*k2+2.*k3+k4)/6.,t+h
def dynamics(self,state,time):
x1 = state[0]
x2 = state[1]
x3 = state[2]
dx1dt = s*(x2-x1);
dx2dt = x1*(r-x3)-x2;
dx3dt = x1*x2-b*x3;
return np.array([dx1dt,dx2dt,dx3dt])
def estimator(self,state,time,L,y):
yv = (np.array([y])).T
Zv = (np.array([state])).T
de = L@(yv-C@Zv)
de = de[:,0]
f = self.dynamics(state,time)
dZdt = f+de
return dZdt
def one_step_sim(self,y):
runge_num = self.runge_num
L = self.GetL()
P = self.GetP()
for num in range(0, runge_num): #self.X, self.tを更新することで全体で更新
self.Z, self.t = self.rk4(L,y)
return self.Z, self.t,P
def GetP(self):
Znet = self.Np2Var(self.Z)
if self.RNN == 1:
cP = self.net(Znet.view(1,1,-1))
cP = cP.data.numpy()*self.YC
P = self.cP2P(cP[0,0,:])
else:
cP = self.net(Znet)
cP = cP.data.numpy()
P = self.cP2P(cP)
return P
def GetL(self):
P = self.GetP()
Ct = C.T
L = P@Ct
return L
def Np2Var(self,X):
X = X.astype(np.float32)
X = torch.from_numpy(X)
return X
def cP2P(self,cP):
cPnp = 0
for i in range(self.ns):
lb = i*(i+1)/2
lb = int(lb)
ub = (i+1)*(i+2)/2
ub = int(ub)
Di = cP[lb:ub]
Di = np.diag(Di,self.ns-(i+1))
cPnp += Di
P = (cPnp.T)@cPnp
return P
def M2cMvec(M):
cholM = np.linalg.cholesky(M)
cholM = cholM.T # upper triangular
ns = M.shape[0]
cM = np.zeros(int(ns*(ns+1)/2))
for ii in range(ns):
jj = (ns-1)-ii;
di = np.diag(cholM,jj)
cM[int(1/2*ii*(ii+1)):int(1/2*(ii+1)*(ii+2))] = di
return cM
def SaveDict(filename,var):
output = open(filename,'wb')
pickle.dump(var,output)
output.close()
pass
def LoadDict(filename):
pkl_file = open(filename,'rb')
varout = pickle.load(pkl_file)
pkl_file.close()
return varout
if __name__ == "__main__":
np.random.seed(seed=5)
numX0 = 10
WindowSize = 50
X0s = (np.random.rand(numX0,3)*2-1)*10
Z0s = (np.random.rand(numX0,3)*2-1)*10
ccm = sccm.CCM(X0s,tspan,0.1,1,np.array([alp]))
X0 = np.array([1,2,0])
Z0 = np.array([10,2,0])
net = rnnl.RNN(n_input,n_hidden,n_output,n_layers)
net.load_state_dict(torch.load('data/trained_nets/RNNLorenz.pt'))
net.eval()
YC = np.load('data/trained_nets/Y_params.npy')
t0 = tspan[0]
tf = tspan[1]
N = np.floor((tf-t0)/dt+1)
N = N.astype(np.int32)
dcMMs = {}
for iX0 in range(numX0):
est = Estimator(Z0,t0,dt)
lor = Lorenz(X0,t0,dt)
Xhis01 = X0
Z1his01 = Z0
cholP1 = {}
t = 0
j = 0
cholP1[0] = M2cMvec(est.GetP())/YC
for i in range(N):
y = lor.measurement()
X,t = lor.one_step_sim()
Z1,t1,P1 = est.one_step_sim(y)
if np.remainder(i+1,10) == 0:
cholP1[j+1] = M2cMvec(est.GetP())/YC
Xhis01 = np.vstack((Xhis01,X))
Z1his01 = np.vstack((Z1his01,Z1))
j += 1
cvx_status,cvx_optval,tt,XX,WWout,chi,nu = ccm.CCMXhis(Z1his01,alp)
dcP = []
cholMM = {}
dcMM = []
for i in range(len(WWout)-1):
Mi = np.linalg.inv(WWout[i])
cMi = M2cMvec(Mi)/YC
cholMM[i] = cMi
dcMM.append((np.linalg.norm(cholMM[i]-cholP1[i]))**2)
dcMMs[iX0] = dcMM
fig = plt.figure()
for iX0 in range(numX0):
dcMM = dcMMs[iX0]
plt.plot(np.linspace(0,50,500),dcMM)
plt.xlabel('t')
plt.ylabel('Error')
plt.show()
fig = plt.figure()
for iX0 in range(numX0):
dcMM = dcMMs[iX0]
datadM = {'score': dcMM}
dfdM = | pd.DataFrame(datadM) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pysight.nd_hist_generator.tag_lens import (
TagPeriodVerifier,
TagPhaseAllocator,
TagPipeline,
numba_digitize,
)
class TestTagPipeline:
"""
Tests for TAG analysis functions
"""
tag_data = pd.Series(np.arange(0, 200 * 6530, 6530))
photons = pd.DataFrame([10, 100, 1000], columns=["abs_time"])
def_pipe = TagPipeline(photons=photons, tag_pulses=tag_data)
def test_preservation(self):
photons = pd.DataFrame([0, 10, 6531], columns=["abs_time"])
pipe = TagPipeline(photons=photons, tag_pulses=self.tag_data)
returned = pd.Series([0, 6530])
assert np.array_equal(
returned, pipe._TagPipeline__preserve_relevant_tag_pulses()
)
def test_preservation_without_zero(self):
photons = pd.DataFrame([10, 6531], columns=["abs_time"])
pipe = TagPipeline(photons=photons, tag_pulses=self.tag_data)
returned = pd.Series([6530])
assert np.array_equal(
returned, pipe._TagPipeline__preserve_relevant_tag_pulses()
)
class TestTagPeriodVerifier:
""" Test the Verifier class """
tag_data = pd.Series(np.arange(0, 200 * 6530, 6530))
freq = 189e3
binwidth = 800e-12
def_verifier = TagPeriodVerifier(
tag=tag_data, freq=freq, binwidth=binwidth, last_photon=np.uint64(200 * 6530)
)
def test_bins_bet_pulses(self):
assert 6614 == self.def_verifier.period
def test_allowed_noise(self):
assert 331 == self.def_verifier.allowed_noise
def test_start_end_no_issues(self):
tag_data = pd.Series(np.arange(0, 100, 10))
freq = 0.1
binwidth = 1.0
verifier = TagPeriodVerifier(
tag=tag_data, freq=freq, binwidth=binwidth, last_photon=np.uint64(100)
)
ret_start, ret_end = verifier._TagPeriodVerifier__obtain_start_end_idx()
my_start = np.array([], dtype=np.int64)
my_end = np.array([], dtype=np.int64)
assert np.array_equal(my_start, ret_start)
assert np.array_equal(my_end, ret_end)
def test_start_end_no_zero(self):
tag_data = pd.Series(np.arange(0, 300, 10))
tag_data.drop([0, 5, 6], inplace=True)
tag_data = tag_data.append(pd.Series([3, 9, 25]))
tag_data = tag_data.sort_values().reset_index(drop=True)
freq = 0.1
binwidth = 1.0
verifier = TagPeriodVerifier(
tag=tag_data, freq=freq, binwidth=binwidth, last_photon=np.uint64(300)
)
ret_start, ret_end = verifier._TagPeriodVerifier__obtain_start_end_idx()
my_start = [0, 3, 6]
my_end = [2, 5, 7]
assert list(ret_start) == my_start
assert list(ret_end) == my_end
def test_start_end_adding_zero(self):
tag_data = pd.Series(np.arange(5, 300, 10))
tag_data.drop([1, 5, 7, 8], inplace=True)
tag_data = tag_data.append(pd.Series([9, 27, 29, 31]))
tag_data = tag_data.sort_values().reset_index(drop=True)
freq = 0.1
binwidth = 1.0
verifier = TagPeriodVerifier(
tag=tag_data, freq=freq, binwidth=binwidth, last_photon=np.uint64(300)
)
ret_start, ret_end = verifier._TagPeriodVerifier__obtain_start_end_idx()
my_start = [0, 7]
my_end = [6, 9]
assert list(ret_start) == my_start
assert list(ret_end) == my_end
def test_fix_tag_pulses_adding_zero(self):
tag_data = pd.Series(np.arange(0, 100, 10))
tag_data.drop([0, 5, 6], inplace=True)
tag_data = tag_data.append(pd.Series([3, 9, 25]))
tag_data = tag_data.sort_values().reset_index(drop=True)
freq = 0.1
binwidth = 1.0
verifier = TagPeriodVerifier(
tag=tag_data, freq=freq, binwidth=binwidth, last_photon=np.uint64(100)
)
my_start = [0, 3, 6]
my_end = [2, 5, 7]
verifier._TagPeriodVerifier__fix_tag_pulses(starts=my_start, ends=my_end)
assert np.array_equal(verifier.tag.values, np.arange(0, 100, 10))
def test_fix_tag_pulses_no_zero_end_missing(self):
tag_data = pd.Series(np.arange(5, 95, 10, dtype=np.uint64))
tag_data.drop([1, 5, 7, 8], inplace=True)
tag_data = tag_data.append( | pd.Series([9, 27, 29, 31], dtype=np.uint64) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import datetime as dt
import numpy as np
from collections import OrderedDict
import os
import pickle
from errorplots import ErrorPlots
class ErrorAnalysis(object):
""" Reads log and output files to analyze errors"""
def __init__(self, train_log_file=None, pred_file=None, period=1, output_field=3):
""" Instantiates the class with the log file and prediction output file
period : prediction period i.e how far out are the predictions in years (1,2,3 etc)
output_field : column to grab in the output file. EBIT is 3
"""
self.train_log_file = train_log_file
self.pred_file = pred_file
self.period = period
self.output_field = output_field
return
def read_train_log(self):
""" Returns mse data from training log file
mse is an ordered dict with epoch as key and (train_mse,validation_mse) as value
"""
if self.train_log_file is None:
print("train log file not provided")
return
mse_data = OrderedDict()
# Iterate through the file
with open(self.train_log_file) as f:
lines = f.readlines()
for line in lines:
line = line.split(' ')
if line[0] == 'Epoch:':
epoch = int(line[1])
train_mse = float(line[4])
valid_mse = float(line[7])
# Add to the mse dict
mse_data[epoch] = (train_mse, valid_mse)
return mse_data
def read_predictions(self):
""" Returns a dict of companies with output and target values# Structure of companies dict
companies : {
gvkey:
period: {
output : { date: output }
target : { date: target}
}
"""
if self.pred_file is None:
print('Predictions file not provided')
return
else:
print('Reading '+self.pred_file)
# initialize the dicts
companies={}
with open(self.pred_file, 'rb') as f:
lines = f.readlines()
for i, line in enumerate(lines):
row = line.split(' ')
try:
date = dt.datetime.strptime(str(row[0]), "%Y%m")
mse_val = float(row[-1].split('=')[-1])
cur_output = float(lines[i + 6].split(' ')[self.output_field])
cur_target = float(lines[i + 7].split(' ')[self.output_field])
if cur_target == 'nan':
cur_target = 0.0
gvkey = row[1]
try:
companies[gvkey][self.period]['output'][date] = cur_output
companies[gvkey][self.period]['target'][date] = cur_target
companies[gvkey][self.period]['mse'][date] = mse_val
except KeyError:
companies[gvkey] = {}
companies[gvkey][self.period] = {}
companies[gvkey][self.period]['output'] = {}
companies[gvkey][self.period]['target'] = {}
companies[gvkey][self.period]['mse'] = {}
except (ValueError, IndexError):
pass
return companies
def get_errors(self, save_csv=False, rel_err_filename='rel_error.csv',mse_err_filename='mse_error.csv'):
""" Returns a dataframe of relative errors where rows are dates and columns are companies
INPUTS
companies: dict returned from read_predictions method
"""
# Read the predictions files to generate company errors
companies = self.read_predictions()
pickle.dump(companies,open('companies.pkl','wb'))
# Initialize dict
rel_err = {}
mse_err = {}
print("Processing Errors...")
for i, key in enumerate(sorted(companies)):
# print(key)
try:
company = companies[key]
p1 = company[1]
out_p1 = sorted(p1['output'].items())
tar_p1 = sorted(p1['target'].items())
mse_p1 = sorted(p1['mse'].items())
x1, y1 = zip(*out_p1)
xt1, yt1 = zip(*tar_p1)
x_mse_1,y_mse_1 = zip(*mse_p1)
rel_err[key] = abs(np.divide(np.array(y1) - np.array(yt1), np.array(yt1)))
mse_err[key] = np.array(y_mse_1)
df_tmp = | pd.DataFrame(data=rel_err[key], index=x1, columns=[key]) | pandas.DataFrame |
#!/usr/bin/env python
"""
# Author: <NAME>
# Created Time : Sat 28 Apr 2018 08:31:29 PM CST
# File Name: SCALE.py
# Description: Single-Cell ATAC-seq Analysis via Latent feature Extraction.
Input:
scATAC-seq data
Output:
1. latent feature
2. cluster assignment
3. imputation data
"""
import time
import torch
import numpy as np
import pandas as pd
import os
import argparse
from scale import SCALE
from scale.dataset import SingleCellDataset
from scale.utils import read_labels, cluster_report, estimate_k, binarization
from scale.plot import plot_embedding
from sklearn.preprocessing import MaxAbsScaler
from torch.utils.data import DataLoader
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SCALE: Single-Cell ATAC-seq Analysis via Latent feature Extraction')
parser.add_argument('--dataset', '-d', type=str, help='input dataset path')
parser.add_argument('--n_centroids', '-k', type=int, help='cluster number')
parser.add_argument('--outdir', '-o', type=str, default='output/', help='Output path')
parser.add_argument('--verbose', action='store_true', help='Print loss of training process')
parser.add_argument('--pretrain', type=str, default=None, help='Load the trained model')
parser.add_argument('--lr', type=float, default=0.002, help='Learning rate')
parser.add_argument('--batch_size', '-b', type=int, default=32, help='Batch size')
parser.add_argument('--gpu', '-g', default=0, type=int, help='Select gpu device number when training')
parser.add_argument('--seed', type=int, default=18, help='Random seed for repeat results')
parser.add_argument('--encode_dim', type=int, nargs='*', default=[1024, 128], help='encoder structure')
parser.add_argument('--decode_dim', type=int, nargs='*', default=[], help='encoder structure')
parser.add_argument('--latent', '-l',type=int, default=10, help='latent layer dim')
parser.add_argument('--low', '-x', type=float, default=0.01, help='Remove low ratio peaks')
parser.add_argument('--high', type=float, default=0.9, help='Remove high ratio peaks')
parser.add_argument('--min_peaks', type=float, default=100, help='Remove low quality cells with few peaks')
parser.add_argument('--log_transform', action='store_true', help='Perform log2(x+1) transform')
parser.add_argument('--max_iter', '-i', type=int, default=30000, help='Max iteration')
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--impute', action='store_true', help='Save the imputed data')
parser.add_argument('--binary', action='store_true', help='Save binary imputed data')
parser.add_argument('--no_tsne', action='store_true', help='Not save the tsne embedding')
parser.add_argument('--reference', '-r', default=None, type=str, help='Reference celltypes')
parser.add_argument('--transpose', '-t', action='store_true', help='Transpose the input matrix')
args = parser.parse_args()
# Set random seed
seed = args.seed
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available(): # cuda device
device='cuda'
torch.cuda.set_device(args.gpu)
else:
device='cpu'
batch_size = args.batch_size
normalizer = MaxAbsScaler()
dataset = SingleCellDataset(args.dataset, low=args.low, high=args.high, min_peaks=args.min_peaks,
transpose=args.transpose, transforms=[normalizer.fit_transform])
trainloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)
testloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False)
cell_num = dataset.shape[0]
input_dim = dataset.shape[1]
if args.n_centroids is None:
k = min(estimate_k(dataset.data.T), 30)
print('Estimate k {}'.format(k))
else:
k = args.n_centroids
lr = args.lr
name = args.dataset.strip('/').split('/')[-1]
args.min_peaks = int(args.min_peaks) if args.min_peaks >= 1 else args.min_peaks
outdir = args.outdir
if not os.path.exists(outdir):
os.makedirs(outdir)
print("\n**********************************************************************")
print(" SCALE: Single-Cell ATAC-seq Analysis via Latent feature Extraction")
print("**********************************************************************\n")
print("======== Parameters ========")
print('Cell number: {}\nPeak number: {}\nn_centroids: {}\nmax_iter: {}\nbatch_size: {}\ncell filter by peaks: {}\nrare peak filter: {}\ncommon peak filter: {}'.format(
cell_num, input_dim, k, args.max_iter, batch_size, args.min_peaks, args.low, args.high))
print("============================")
dims = [input_dim, args.latent, args.encode_dim, args.decode_dim]
model = SCALE(dims, n_centroids=k)
# print(model)
if not args.pretrain:
print('\n## Training Model ##')
model.init_gmm_params(testloader)
model.fit(trainloader,
lr=lr,
weight_decay=args.weight_decay,
verbose=args.verbose,
device = device,
max_iter=args.max_iter,
name=name,
outdir=outdir
)
# torch.save(model.to('cpu').state_dict(), os.path.join(outdir, 'model.pt')) # save model
else:
print('\n## Loading Model: {}\n'.format(args.pretrain))
model.load_model(args.pretrain)
model.to(device)
### output ###
print('outdir: {}'.format(outdir))
# 1. latent feature
feature = model.encodeBatch(testloader, device=device, out='z')
| pd.DataFrame(feature) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_monotonic_and_unique(self):
assert self.index.is_monotonic
assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_breaks([0, 1, 2], closed='both')
assert index.slice_locs(1, 1) == (0, 2)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = | IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)]) | pandas.IntervalIndex.from_tuples |
#Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
import itertools
import copy
import csv
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import shutil
from settings import config_plotting
import time
def get_column_name_number(dir_addr, mode):
column_name_number_dic = {}
try:
if mode == "all":
file_name = "result_summary/FARSI_simple_run_0_1_all_reults.csv"
else:
file_name = "result_summary/FARSI_simple_run_0_1.csv"
file_full_addr = os.path.join(dir_addr, file_name)
with open(file_full_addr) as f:
resultReader = csv.reader(f, delimiter=',', quotechar='|')
for row in resultReader:
for idx, el_name in enumerate(row):
column_name_number_dic[el_name] = idx
break
return column_name_number_dic
except Exception as e:
raise e
#
# the function to get the column information of the given category
def columnNum(dirName, fileName, cate, result):
if result == "all":
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(resultReader):
if i == 0:
for j in range(0, len(row)):
if row[j] == cate:
return j
raise Exception("No such category in the list! Check the name: " + cate)
break
elif result == "simple":
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(resultReader):
if i == 0:
for j in range(0, len(row)):
if row[j] == cate:
return j
raise Exception("No such category in the list! Check the name: " + cate)
break
else:
raise Exception("No such result file! Check the result type! It should be either \"all\" or \"simple\"")
# the function to plot the frequency of all comm_comp in the pie chart
def plotCommCompAll(dirName, fileName, all_res_column_name_number):
colNum = all_res_column_name_number["comm_comp"]
truNum = all_res_column_name_number["move validity"]
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
commNum = 0
compNum = 0
for i, row in enumerate(resultReader):
if row[trueNum] != "True":
continue
if i > 1:
if row[colNum] == "comm":
commNum += 1
elif row[colNum] == "comp":
compNum += 1
else:
raise Exception("comm_comp is not giving comm or comp! The new type: " + row[colNum])
plt.figure()
plt.pie([commNum, compNum], labels = ["comm", "comp"])
plt.title("comm_comp: Frequency")
plt.savefig(dirName + fileName + "/comm-compFreq-" + fileName + ".png")
# plt.show()
plt.close('all')
# the function to plot the frequency of all high level optimizations in the pie chart
def plothighLevelOptAll(dirName, fileName, all_res_column_name_number):
colNum = all_res_column_name_number["high level optimization name"]
truNum = all_res_column_name_number["move validity"]
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
topoNum = 0
tunNum = 0
mapNum = 0
idenOptNum = 0
for i, row in enumerate(resultReader):
if row[trueNum] != "True":
continue
if i > 1:
if row[colNum] == "topology":
topoNum += 1
elif row[colNum] == "customization":
tunNum += 1
elif row[colNum] == "mapping":
mapNum += 1
elif row[colNum] == "identity":
idenOptNum += 1
else:
raise Exception("high level optimization name is not giving topology or customization or mapping or identity! The new type: " + row[colNum])
plt.figure()
plt.pie([topoNum, tunNum, mapNum, idenOptNum], labels = ["topology", "customization", "mapping", "identity"])
plt.title("High Level Optimization: Frequency")
plt.savefig(dirName + fileName + "/highLevelOpt-" + fileName + ".png")
# plt.show()
plt.close('all')
# the function to plot the frequency of all architectural variables to improve in the pie chart
def plotArchVarImpAll(dirName, fileName, colNum, trueNum):
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
parazNum = 0
custNum = 0
localNum = 0
idenImpNum = 0
for i, row in enumerate(resultReader):
if row[trueNum] != "True":
continue
if i > 1:
if row[colNum] == "parallelization":
parazNum += 1
elif row[colNum] == "customization":
custNum += 1
elif row[colNum] == "locality":
localNum += 1
elif row[colNum] == "identity":
idenImpNum += 1
else:
raise Exception("architectural principle is not parallelization or customization or locality or identity! The new type: " + row[colNum])
plt.figure()
plt.pie([parazNum, custNum, localNum, idenImpNum], labels = ["parallelization", "customization", "locality", "identity"])
plt.title("Architectural Principle: Frequency")
plt.savefig(dirName + fileName + "/archVarImp-" + fileName + ".png")
# plt.show()
plt.close('all')
# the function to plot simulation time vs. system block count
def plotSimTimeVSblk(dirName, fileName, blkColNum, simColNum, trueNum):
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
sysBlkCount = []
simTime = []
for i, row in enumerate(resultReader):
if row[trueNum] != "True":
continue
if i > 1:
sysBlkCount.append(int(row[blkColNum]))
simTime.append(float(row[simColNum]))
plt.figure()
plt.plot(sysBlkCount, simTime)
plt.xlabel("System Block Count")
plt.ylabel("Simulation Time")
plt.title("Simulation Time vs. Sytem Block Count")
plt.savefig(dirName + fileName + "/simTimeVSblk-" + fileName + ".png")
# plt.show()
plt.close('all')
# the function to plot move generation time vs. system block count
def plotMoveGenTimeVSblk(dirName, fileName, blkColNum, movColNum, trueNum):
with open(dirName + fileName + "/result_summary/FARSI_simple_run_0_1_all_reults.csv", newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
sysBlkCount = []
moveGenTime = []
for i, row in enumerate(resultReader):
if row[trueNum] != "True":
continue
if i > 1:
sysBlkCount.append(int(row[blkColNum]))
moveGenTime.append(float(row[movColNum]))
plt.figure()
plt.plot(sysBlkCount, moveGenTime)
plt.xlabel("System Block Count")
plt.ylabel("Move Generation Time")
plt.title("Move Generation Time vs. System Block Count")
plt.savefig(dirName + fileName + "/moveGenTimeVSblk-" + fileName + ".png")
# plt.show()
plt.close('all')
def get_experiments_workload(all_res_column_name):
latency_budget = all_res_column_name_number["latency budget"][:-1]
workload_latency = latency_budget.split(";")
workloads = []
for workload_latency in workload_latency:
workloads.append(workload_latency.split("=")[0])
return workloads
def get_experiments_name(file_full_addr, all_res_column_name_number):
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
row1 = next(resultReader)
row2 = next(resultReader)
latency_budget = row2[all_res_column_name_number["latency_budget"]]
power_budget = row2[all_res_column_name_number["power_budget"]]
area_budget = row2[all_res_column_name_number["area_budget"]]
try:
transformation_selection_mode = row2[all_res_column_name_number["transformation_selection_mode"]]
except:
transformation_selection_mode = ""
workload_latency = latency_budget[:-1].split(';')
latency_budget_refined =""
for workload_latency in workload_latency:
latency_budget_refined +="_" + (workload_latency.split("=")[0][0]+workload_latency.split("=")[1])
return latency_budget_refined[1:]+"_" + power_budget + "_" + area_budget+"_"+transformation_selection_mode
def get_all_col_values_of_a_file(file_full_addr, all_res_column_name_number, column_name):
column_number = all_res_column_name_number[column_name]
all_values = []
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
experiment_name = get_experiments_name(file_full_addr, all_res_column_name_number)
for i, row in enumerate(resultReader):
if i > 1:
if not row[column_number] == '':
value =row[column_number]
values = value.split(";") # if mutiple values
for val in values:
if "=" in val:
val_splitted = val.split("=")
all_values.append(val_splitted[0])
else:
all_values.append(val)
return all_values
def get_all_col_values_of_a_folders(input_dir_names, input_all_res_column_name_number, column_name):
all_values = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
all_values.extend(get_all_col_values_of_a_file(file_full_addr, input_all_res_column_name_number, column_name))
# get rid of duplicates
all_values_rid_of_duplicates = list(set(all_values))
return all_values_rid_of_duplicates
def extract_latency_values(values_):
print("")
def plot_codesign_rate_efficacy_cross_workloads_updated(input_dir_names, res_column_name_number):
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
move_name_number = all_res_column_name_number["move name"]
# experiment_names
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
axis_font = {'fontname': 'Arial', 'size': '4'}
x_column_name = "iteration cnt"
#y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
y_column_name_list = ["exact optimization name", "architectural principle", "comm_comp", "workload"]
#y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
column_co_design_cnt = {}
column_non_co_design_cnt = {}
column_co_design_rate = {}
column_non_co_design_rate = {}
column_co_design_efficacy_avg = {}
column_non_co_design_efficacy_rate = {}
column_non_co_design_efficacy = {}
column_co_design_dist= {}
column_co_design_dist_avg= {}
column_co_design_improvement = {}
experiment_name_list = []
last_col_val = ""
for file_full_addr in file_full_addr_list:
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_name_list.append(experiment_name)
column_co_design_dist_avg[experiment_name] = {}
column_co_design_efficacy_avg[experiment_name] = {}
column_co_design_cnt = {}
for y_column_name in y_column_name_list:
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
dis_to_goal_column_number = res_column_name_number["dist_to_goal_non_cost"]
ref_des_dis_to_goal_column_number = res_column_name_number["ref_des_dist_to_goal_non_cost"]
column_co_design_cnt[y_column_name] = []
column_non_co_design_cnt[y_column_name] = []
column_non_co_design_efficacy[y_column_name] = []
column_co_design_dist[y_column_name] = []
column_co_design_improvement[y_column_name] = []
column_co_design_rate[y_column_name] = []
all_values = get_all_col_values_of_a_folders(input_dir_names, all_res_column_name_number, y_column_name)
last_row_change = ""
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
rows = list(resultReader)
for i, row in enumerate(rows):
if i >= 1:
last_row = rows[i - 1]
if row[y_column_number] not in all_values or row[move_name_number]=="identity":
continue
col_value = row[y_column_number]
col_values = col_value.split(";")
for idx, col_val in enumerate(col_values):
# only for improvement
if float(row[ref_des_dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]) < 0:
continue
delta_x_column = (float(row[x_column_number]) - float(last_row[x_column_number]))/len(col_values)
delta_improvement = (float(last_row[dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]))/(float(last_row[dis_to_goal_column_number])*len(col_values))
if not col_val == last_col_val and i > 1:
if not last_row_change == "":
distance_from_last_change = float(last_row[x_column_number]) - float(last_row_change[x_column_number]) + idx * delta_x_column
column_co_design_dist[y_column_name].append(distance_from_last_change)
improvement_from_last_change = (float(last_row[dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]))/float(last_row[dis_to_goal_column_number]) + idx *delta_improvement
column_co_design_improvement[y_column_name].append(improvement_from_last_change)
last_row_change = copy.deepcopy(last_row)
last_col_val = col_val
# co_des cnt
# we ignore the first element as the first element distance is always zero
co_design_dist_sum = 0
co_design_efficacy_sum = 0
avg_ctr = 1
co_design_dist_selected = column_co_design_dist[y_column_name]
co_design_improvement_selected = column_co_design_improvement[y_column_name]
for idx,el in enumerate(column_co_design_dist[y_column_name]):
if idx == len(co_design_dist_selected) - 1:
break
co_design_dist_sum += (column_co_design_dist[y_column_name][idx] + column_co_design_dist[y_column_name][idx+1])
co_design_efficacy_sum += (column_co_design_improvement[y_column_name][idx] + column_co_design_improvement[y_column_name][idx+1])
#/(column_co_design_dist[y_column_name][idx] + column_co_design_dist[y_column_name][idx+1])
avg_ctr+=1
column_co_design_improvement = {}
column_co_design_dist_avg[experiment_name][y_column_name]= co_design_dist_sum/avg_ctr
column_co_design_efficacy_avg[experiment_name][y_column_name] = co_design_efficacy_sum/avg_ctr
#result = {"rate":{}, "efficacy":{}}
#rate_column_co_design = {}
plt.figure()
plotdata = pd.DataFrame(column_co_design_dist_avg, index=y_column_name_list)
fontSize = 10
plotdata.plot(kind='bar', fontsize=fontSize)
plt.xticks(fontsize=fontSize, rotation=6)
plt.yticks(fontsize=fontSize)
plt.xlabel("co design parameter", fontsize=fontSize)
plt.ylabel("co design distance", fontsize=fontSize)
plt.title("co desgin distance of different parameters", fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "cross_workloads/co_design_rate")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir,"_".join(experiment_name_list) +"_"+"co_design_avg_dist"+'_'.join(y_column_name_list)+".png"))
plt.close('all')
plt.figure()
plotdata = pd.DataFrame(column_co_design_efficacy_avg, index=y_column_name_list)
fontSize = 10
plotdata.plot(kind='bar', fontsize=fontSize)
plt.xticks(fontsize=fontSize, rotation=6)
plt.yticks(fontsize=fontSize)
plt.xlabel("co design parameter", fontsize=fontSize)
plt.ylabel("co design dis", fontsize=fontSize)
plt.title("co desgin efficacy of different parameters", fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "cross_workloads/co_design_rate")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir,"_".join(experiment_name_list) +"_"+"co_design_efficacy"+'_'.join(y_column_name_list)+".png"))
plt.close('all')
def plot_codesign_rate_efficacy_per_workloads(input_dir_names, res_column_name_number):
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
move_name_number = all_res_column_name_number["move name"]
# experiment_names
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
axis_font = {'fontname': 'Arial', 'size': '4'}
x_column_name = "iteration cnt"
#y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
y_column_name_list = ["exact optimization name", "architectural principle", "comm_comp", "workload"]
#y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
column_co_design_cnt = {}
column_non_co_design_cnt = {}
column_co_design_rate = {}
column_non_co_design_rate = {}
column_co_design_efficacy_rate = {}
column_non_co_design_efficacy_rate = {}
column_non_co_design_efficacy = {}
column_co_design_efficacy= {}
last_col_val = ""
for file_full_addr in file_full_addr_list:
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
column_co_design_cnt = {}
for y_column_name in y_column_name_list:
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
dis_to_goal_column_number = res_column_name_number["dist_to_goal_non_cost"]
ref_des_dis_to_goal_column_number = res_column_name_number["ref_des_dist_to_goal_non_cost"]
column_co_design_cnt[y_column_name] = []
column_non_co_design_cnt[y_column_name] = []
column_non_co_design_efficacy[y_column_name] = []
column_co_design_efficacy[y_column_name] = []
all_values = get_all_col_values_of_a_folders(input_dir_names, all_res_column_name_number, y_column_name)
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
rows = list(resultReader)
for i, row in enumerate(rows):
if i >= 1:
last_row = rows[i - 1]
if row[y_column_number] not in all_values or row[trueNum] == "False" or row[move_name_number]=="identity":
continue
col_value = row[y_column_number]
col_values = col_value.split(";")
for idx, col_val in enumerate(col_values):
delta_x_column = (float(row[x_column_number]) - float(last_row[x_column_number]))/len(col_values)
value_to_add_1 = (float(last_row[x_column_number]) + idx * delta_x_column, 1)
value_to_add_0 = (float(last_row[x_column_number]) + idx * delta_x_column, 0)
# only for improvement
if float(row[ref_des_dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]) < 0:
continue
if not col_val == last_col_val:
column_co_design_cnt[y_column_name].append(value_to_add_1)
column_non_co_design_cnt[y_column_name].append(value_to_add_0)
column_co_design_efficacy[y_column_name].append((float(row[ref_des_dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]))/float(row[ref_des_dis_to_goal_column_number]))
column_non_co_design_efficacy[y_column_name].append(0)
else:
column_co_design_cnt[y_column_name].append(value_to_add_0)
column_non_co_design_cnt[y_column_name].append(value_to_add_1)
column_co_design_efficacy[y_column_name].append(0)
column_non_co_design_efficacy[y_column_name].append((float(row[ref_des_dis_to_goal_column_number]) - float(row[dis_to_goal_column_number]))/float(row[ref_des_dis_to_goal_column_number]))
last_col_val = col_val
# co_des cnt
x_values_co_design_cnt = [el[0] for el in column_co_design_cnt[y_column_name]]
y_values_co_design_cnt = [el[1] for el in column_co_design_cnt[y_column_name]]
y_values_co_design_cnt_total =sum(y_values_co_design_cnt)
total_iter = x_values_co_design_cnt[-1]
# non co_des cnt
x_values_non_co_design_cnt = [el[0] for el in column_non_co_design_cnt[y_column_name]]
y_values_non_co_design_cnt = [el[1] for el in column_non_co_design_cnt[y_column_name]]
y_values_non_co_design_cnt_total =sum(y_values_non_co_design_cnt)
column_co_design_rate[y_column_name] = y_values_co_design_cnt_total/total_iter
column_non_co_design_rate[y_column_name] = y_values_non_co_design_cnt_total/total_iter
# co_des efficacy
y_values_co_design_efficacy = column_co_design_efficacy[y_column_name]
y_values_co_design_efficacy_total =sum(y_values_co_design_efficacy)
# non co_des efficacy
y_values_non_co_design_efficacy = column_non_co_design_efficacy[y_column_name]
y_values_non_co_design_efficacy_total =sum(y_values_non_co_design_efficacy)
column_co_design_efficacy_rate[y_column_name] = y_values_co_design_efficacy_total/(y_values_non_co_design_efficacy_total + y_values_co_design_efficacy_total)
column_non_co_design_efficacy_rate[y_column_name] = y_values_non_co_design_efficacy_total/(y_values_non_co_design_efficacy_total + y_values_co_design_efficacy_total)
result = {"rate":{}, "efficacy":{}}
rate_column_co_design = {}
result["rate"] = {"co_design":column_co_design_rate, "non_co_design": column_non_co_design_rate}
result["efficacy_rate"] = {"co_design":column_co_design_efficacy_rate, "non_co_design": column_non_co_design_efficacy_rate}
# prepare for plotting and plot
plt.figure()
plotdata = pd.DataFrame(result["rate"], index=y_column_name_list)
fontSize = 10
plotdata.plot(kind='bar', fontsize=fontSize, stacked=True)
plt.xticks(fontsize=fontSize, rotation=6)
plt.yticks(fontsize=fontSize)
plt.xlabel("co design parameter", fontsize=fontSize)
plt.ylabel("co design rate", fontsize=fontSize)
plt.title("co desgin rate of different parameters", fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/co_design_rate")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir,experiment_name +"_"+"co_design_rate_"+'_'.join(y_column_name_list)+".png"))
plt.close('all')
plt.figure()
plotdata = pd.DataFrame(result["efficacy_rate"], index=y_column_name_list)
fontSize = 10
plotdata.plot(kind='bar', fontsize=fontSize, stacked=True)
plt.xticks(fontsize=fontSize, rotation=6)
plt.yticks(fontsize=fontSize)
plt.xlabel("co design parameter", fontsize=fontSize)
plt.ylabel("co design efficacy rate", fontsize=fontSize)
plt.title("co design efficacy rate of different parameters", fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/co_design_rate")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir,experiment_name+"_"+"co_design_efficacy_rate_"+'_'.join(y_column_name_list)+".png"))
plt.close('all')
def plot_codesign_progression_per_workloads(input_dir_names, res_column_name_number):
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
axis_font = {'size': '20'}
x_column_name = "iteration cnt"
y_column_name_list = ["high level optimization name", "exact optimization name", "architectural principle", "comm_comp"]
experiment_column_value = {}
for file_full_addr in file_full_addr_list:
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
for y_column_name in y_column_name_list:
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
experiment_column_value[experiment_name] = []
all_values = get_all_col_values_of_a_folders(input_dir_names, all_res_column_name_number, y_column_name)
all_values_encoding = {}
for idx, val in enumerate(all_values):
all_values_encoding[val] = idx
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
rows = list(resultReader)
for i, row in enumerate(rows):
#if row[trueNum] != "True":
# continue
if i >= 1:
if row[y_column_number] not in all_values:
continue
col_value = row[y_column_number]
col_values = col_value.split(";")
for idx, col_val in enumerate(col_values):
last_row = rows[i-1]
delta_x_column = (float(row[x_column_number]) - float(last_row[x_column_number]))/len(col_values)
value_to_add = (float(last_row[x_column_number])+ idx*delta_x_column, col_val)
experiment_column_value[experiment_name].append(value_to_add)
# prepare for plotting and plot
axis_font = {'size': '20'}
fontSize = 20
fig = plt.figure(figsize=(12, 8))
plt.rc('font', **axis_font)
ax = fig.add_subplot(111)
x_values = [el[0] for el in experiment_column_value[experiment_name]]
#y_values = [all_values_encoding[el[1]] for el in experiment_column_value[experiment_name]]
y_values = [el[1] for el in experiment_column_value[experiment_name]]
#ax.set_title("experiment vs system implicaction")
ax.tick_params(axis='both', which='major', labelsize=fontSize, rotation=60)
ax.set_xlabel(x_column_name, fontsize=20)
ax.set_ylabel(y_column_name, fontsize=20)
ax.plot(x_values, y_values, label=y_column_name, linewidth=2)
ax.legend(bbox_to_anchor=(1, 1), loc='upper left', fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/progression")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.tight_layout()
fig.savefig(os.path.join(output_dir,experiment_name+"_progression_"+'_'.join(y_column_name_list)+".png"))
# plt.show()
plt.close('all')
fig = plt.figure(figsize=(12, 8))
plt.rc('font', **axis_font)
ax = fig.add_subplot(111)
x_values = [el[0] for el in experiment_column_value[experiment_name]]
# y_values = [all_values_encoding[el[1]] for el in experiment_column_value[experiment_name]]
y_values = [el[1] for el in experiment_column_value[experiment_name]]
# ax.set_title("experiment vs system implicaction")
ax.tick_params(axis='both', which='major', labelsize=fontSize, rotation=60)
ax.set_xlabel(x_column_name, fontsize=20)
ax.set_ylabel(y_column_name, fontsize=20)
ax.plot(x_values, y_values, label=y_column_name, linewidth=2)
ax.legend(bbox_to_anchor=(1, 1), loc='upper left', fontsize=fontSize)
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/progression")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, experiment_name + "_progression_" + y_column_name + ".png"))
# plt.show()
plt.close('all')
def plot_3d(input_dir_names, res_column_name_number):
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
axis_font = {'size': '10'}
fontSize = 10
column_value = {}
# initialize the dictionary
column_name_list = ["budget_scaling_power", "budget_scaling_area","budget_scaling_latency"]
under_study_vars =["iteration cnt",
"local_bus_avg_theoretical_bandwidth", "local_bus_max_actual_bandwidth",
"local_bus_avg_actual_bandwidth",
"system_bus_avg_theoretical_bandwidth", "system_bus_max_actual_bandwidth",
"system_bus_avg_actual_bandwidth", "global_total_traffic", "local_total_traffic",
"global_memory_total_area", "local_memory_total_area", "ips_total_area",
"gpps_total_area","ip_cnt", "max_accel_parallelism", "avg_accel_parallelism",
"gpp_cnt", "max_gpp_parallelism", "avg_gpp_parallelism"]
# get all the data
for file_full_addr in file_full_addr_list:
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
experiment_name = get_experiments_name( file_full_addr, res_column_name_number)
for i, row in enumerate(resultReader):
#if row[trueNum] != "True":
# continue
if i >= 1:
for column_name in column_name_list + under_study_vars:
if column_name not in column_value.keys() :
column_value[column_name] = []
column_number = res_column_name_number[column_name]
col_value = row[column_number]
col_values = col_value.split(";")
if "=" in col_values[0]:
column_value[column_name].append(float((col_values[0]).split("=")[1]))
else:
column_value[column_name].append(float(col_values[0]))
for idx,under_study_var in enumerate(under_study_vars):
fig_budget_blkcnt = plt.figure(figsize=(12, 12))
plt.rc('font', **axis_font)
ax_blkcnt = fig_budget_blkcnt.add_subplot(projection='3d')
img = ax_blkcnt.scatter3D(column_value["budget_scaling_power"], column_value["budget_scaling_area"], column_value["budget_scaling_latency"],
c=column_value[under_study_var], cmap="bwr", s=80, label="System Block Count")
for idx,_ in enumerate(column_value[under_study_var]):
coordinate = column_value[under_study_var][idx]
coord_in_scientific_notatio = "{:.2e}".format(coordinate)
ax_blkcnt.text(column_value["budget_scaling_power"][idx], column_value["budget_scaling_area"][idx], column_value["budget_scaling_latency"][idx], '%s' % coord_in_scientific_notatio, size=fontSize)
ax_blkcnt.set_xlabel("Power Budget", fontsize=fontSize)
ax_blkcnt.set_ylabel("Area Budget", fontsize=fontSize)
ax_blkcnt.set_zlabel("Latency Budget", fontsize=fontSize)
ax_blkcnt.legend()
cbar = fig_budget_blkcnt.colorbar(img, aspect=40)
cbar.set_label("System Block Count", rotation=270)
#plt.title("{Power Budget, Area Budget, Latency Budget} VS System Block Count: " + subDirName)
plt.tight_layout()
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "3D/case_studies")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
plt.savefig(os.path.join(output_dir, under_study_var+ ".png"))
# plt.show()
plt.close('all')
def plot_convergence_per_workloads(input_dir_names, res_column_name_number):
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
move_name_number = all_res_column_name_number["move name"]
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
color_values = ["r","b","y","black","brown","purple"]
column_name_color_val_dict = {"best_des_so_far_power":"purple", "power_budget":"purple","best_des_so_far_area_non_dram":"blue", "area_budget":"blue",
"latency_budget_hpvm_cava":"orange", "latency_budget_audio_decoder":"yellow", "latency_budget_edge_detection":"red",
"best_des_so_far_latency_hpvm_cava":"orange", "best_des_so_far_latency_audio_decoder": "yellow","best_des_so_far_latency_edge_detection": "red",
"latency_budget":"white"
}
axis_font = {'size': '20'}
fontSize = 20
x_column_name = "iteration cnt"
y_column_name_list = ["power", "area_non_dram", "latency", "latency_budget", "power_budget","area_budget"]
experiment_column_value = {}
for file_full_addr in file_full_addr_list:
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_column_value[experiment_name] = {}
for y_column_name in y_column_name_list:
if "budget" in y_column_name:
prefix = ""
else:
prefix = "best_des_so_far_"
y_column_name = prefix+y_column_name
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
#dis_to_goal_column_number = res_column_name_number["dist_to_goal_non_cost"]
#ref_des_dis_to_goal_column_number = res_column_name_number["ref_des_dist_to_goal_non_cost"]
if not y_column_name == prefix+"latency":
experiment_column_value[experiment_name][y_column_name] = []
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(resultReader):
if i > 1:
if row[trueNum] == "FALSE" or row[move_name_number]=="identity":
continue
col_value = row[y_column_number]
if ";" in col_value:
col_value = col_value[:-1]
col_values = col_value.split(";")
for col_val in col_values:
if "=" in col_val:
val_splitted = col_val.split("=")
value_to_add = (float(row[x_column_number]), (val_splitted[0], val_splitted[1]))
else:
value_to_add = (float(row[x_column_number]), col_val)
if y_column_name in [prefix+"latency", prefix+"latency_budget"] :
new_tuple = (value_to_add[0], 1000*float(value_to_add[1][1]))
if y_column_name+"_"+value_to_add[1][0] not in experiment_column_value[experiment_name].keys():
experiment_column_value[experiment_name][y_column_name + "_" + value_to_add[1][0]] = []
experiment_column_value[experiment_name][y_column_name+"_"+value_to_add[1][0]].append(new_tuple)
if y_column_name in [prefix+"power", prefix+"power_budget"]:
new_tuple = (value_to_add[0], float(value_to_add[1])*1000)
experiment_column_value[experiment_name][y_column_name].append(new_tuple)
elif y_column_name in [prefix+"area_non_dram", prefix+"area_budget"]:
new_tuple = (value_to_add[0], float(value_to_add[1]) * 1000000)
experiment_column_value[experiment_name][y_column_name].append(new_tuple)
# prepare for plotting and plot
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(111)
for column, values in experiment_column_value[experiment_name].items():
x_values = [el[0] for el in values]
y_values = [el[1] for el in values]
ax.set_yscale('log')
if "budget" in column:
marker = 'x'
alpha_ = .3
else:
marker = "_"
alpha_ = 1
ax.plot(x_values, y_values, label=column, c=column_name_color_val_dict[column], marker=marker, alpha=alpha_)
#ax.set_title("experiment vs system implicaction")
ax.set_xlabel(x_column_name, fontsize=fontSize)
y_axis_name = "_".join(list(experiment_column_value[experiment_name].keys()))
ax.set_ylabel(y_axis_name, fontsize=fontSize)
ax.legend(bbox_to_anchor=(1, 1), loc='upper left', fontsize=fontSize)
plt.tight_layout()
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/convergence")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fig.savefig(os.path.join(output_dir,experiment_name+"_convergence.png"))
# plt.show()
plt.close('all')
def plot_convergence_vs_time(input_dir_names, res_column_name_number):
PA_time_scaling_factor = 10
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
axis_font = {'size': '15'}
fontSize = 20
x_column_name = "exploration_plus_simulation_time"
y_column_name_list = ["best_des_so_far_dist_to_goal_non_cost"]
PA_column_experiment_value = {}
FARSI_column_experiment_value = {}
#column_name = "move name"
for k, file_full_addr in enumerate(file_full_addr_list):
for y_column_name in y_column_name_list:
# get all possible the values of interest
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
PA_column_experiment_value[y_column_name] = []
FARSI_column_experiment_value[y_column_name] = []
PA_last_time = 0
FARSI_last_time = 0
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
experiment_name = get_experiments_name( file_full_addr, res_column_name_number)
for i, row in enumerate(resultReader):
#if row[trueNum] != "True":
# continue
if i >= 1:
FARSI_last_time += float(row[x_column_number])
FARSI_value_to_add = (float(FARSI_last_time), row[y_column_number])
FARSI_column_experiment_value[y_column_name].append(FARSI_value_to_add)
PA_last_time = FARSI_last_time*PA_time_scaling_factor
PA_value_to_add = (float(PA_last_time), row[y_column_number])
PA_column_experiment_value[y_column_name].append(PA_value_to_add)
# prepare for plotting and plot
fig = plt.figure(figsize=(12, 12))
plt.rc('font', **axis_font)
ax = fig.add_subplot(111)
fontSize = 20
#plt.tight_layout()
x_values = [el[0] for el in FARSI_column_experiment_value[y_column_name]]
y_values = [str(float(el[1]) * 100 // 1 / 100.0) for el in FARSI_column_experiment_value[y_column_name]]
x_values.reverse()
y_values.reverse()
ax.scatter(x_values, y_values, label="FARSI time to completion", marker="_")
# ax.set_yscale('log')
x_values = [el[0] for el in PA_column_experiment_value[y_column_name]]
y_values = [str(float(el[1]) * 100 // 1 / 100.0) for el in PA_column_experiment_value[y_column_name]]
x_values.reverse()
y_values.reverse()
ax.scatter(x_values, y_values, label="PA time to completion", marker="_")
#ax.set_xscale('log')
#ax.set_title("experiment vs system implicaction")
ax.legend(loc="upper right")#bbox_to_anchor=(1, 1), loc="upper left")
ax.set_xlabel(x_column_name, fontsize=fontSize)
ax.set_ylabel(y_column_name, fontsize=fontSize)
plt.tight_layout()
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "single_workload/progression")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fig.savefig(os.path.join(output_dir,str(k)+"_" + y_column_name+"_vs_"+x_column_name+"_FARSI_vs_PA.png"))
#plt.show()
plt.close('all')
def plot_convergence_cross_workloads(input_dir_names, res_column_name_number):
#itrColNum = all_res_column_name_number["iteration cnt"]
#distColNum = all_res_column_name_number["dist_to_goal_non_cost"]
trueNum = all_res_column_name_number["move validity"]
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1_all_reults.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
axis_font = {'size': '20'}
x_column_name = "iteration cnt"
y_column_name_list = ["best_des_so_far_dist_to_goal_non_cost", "dist_to_goal_non_cost"]
column_experiment_value = {}
#column_name = "move name"
for y_column_name in y_column_name_list:
# get all possible the values of interest
y_column_number = res_column_name_number[y_column_name]
x_column_number = res_column_name_number[x_column_name]
column_experiment_value[y_column_name] = {}
# initialize the dictionary
# get all the data
for file_full_addr in file_full_addr_list:
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
experiment_name = get_experiments_name( file_full_addr, res_column_name_number)
column_experiment_value[y_column_name][experiment_name] = []
for i, row in enumerate(resultReader):
#if row[trueNum] != "True":
# continue
if i >= 1:
value_to_add = (float(row[x_column_number]), max(float(row[y_column_number]),.01))
column_experiment_value[y_column_name][experiment_name].append(value_to_add)
# prepare for plotting and plot
fig = plt.figure()
ax = fig.add_subplot(111)
#plt.tight_layout()
for experiment_name, values in column_experiment_value[y_column_name].items():
x_values = [el[0] for el in values]
y_values = [el[1] for el in values]
ax.scatter(x_values, y_values, label=experiment_name[1:])
#ax.set_title("experiment vs system implicaction")
ax.set_yscale('log')
ax.legend(bbox_to_anchor=(1, 1), loc="best")
ax.set_xlabel(x_column_name)
ax.set_ylabel(y_column_name)
plt.tight_layout()
# dump in the top folder
output_base_dir = '/'.join(input_dir_names[0].split("/")[:-2])
output_dir = os.path.join(output_base_dir, "cross_workloads/convergence")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fig.savefig(os.path.join(output_dir,x_column_name+"_"+y_column_name+".png"))
# plt.show()
plt.close('all')
def plot_system_implication_analysis(input_dir_names, res_column_name_number, case_study):
# experiment_names
experiment_names = []
file_full_addr_list = []
for dir_name in input_dir_names:
file_full_addr = os.path.join(dir_name, "result_summary/FARSI_simple_run_0_1.csv")
file_full_addr_list.append(file_full_addr)
experiment_name = get_experiments_name(file_full_addr, res_column_name_number)
experiment_names.append(experiment_name)
axis_font = {'size': '10'}
column_name_list = list(case_study.values())[0]
column_experiment_value = {}
#column_name = "move name"
for column_name in column_name_list:
# get all possible the values of interest
column_number = res_column_name_number[column_name]
column_experiment_value[column_name] = {}
# initialize the dictionary
column_experiment_number_dict = {}
experiment_number_dict = {}
# get all the data
for file_full_addr in file_full_addr_list:
with open(file_full_addr, newline='') as csvfile:
resultReader = csv.reader(csvfile, delimiter=',', quotechar='|')
experiment_name = get_experiments_name( file_full_addr, res_column_name_number)
for i, row in enumerate(resultReader):
#if row[trueNum] != "True":
# continue
if i >= 1:
col_value = row[column_number]
col_values = col_value.split(";")
for col_val in col_values:
column_experiment_value[column_name][experiment_name] = float(col_val)
# prepare for plotting and plot
# plt.figure()
index = experiment_names
plotdata = | pd.DataFrame(column_experiment_value, index=index) | pandas.DataFrame |
import pandas as pd
import pickle
import os
import argparse
import sys
if './' not in sys.path:
sys.path.append('./')
from src.train import FrankModelTrainer, Trainer
from src.dataset import get_datasets
from src.utils.eval_values import frank_m2_similarity
from src.comparators.activation_comparator import ActivationComparator
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple settings.')
parser.add_argument('i', type=int, help='Index of tiny pairs from 0 to 9')
parser.add_argument('-c','--csv', default="results/official/tiny_bn/summary.csv")
parser.add_argument('-o','--out-dir', default='results/collection/cka_per_iter_lr')
parser.add_argument('-cr','--cka-reg', type=float, default=0)
parser.add_argument('-n', '--n-iter', type=int, default=500)
parser.add_argument('-l', '--layer', default='bn3')
parser.add_argument('--init', default='ps_inv')
return parser.parse_args(args)
def get_df(csv, layer, init='ps_inv'):
df = | pd.read_csv(csv) | pandas.read_csv |
#! /usr/bin/env python3
import os
import sys
import requests
from pandas import DataFrame
from msmarco import load_msmarco_queries, load_msmarco_qrels, extract_querie_relevance
def create_request_specific_ids(query, rankprofile, doc_ids):
body = {
"yql": "select id, rankfeatures from sources * where (userInput(@userQuery))",
"userQuery": query,
"hits": len(doc_ids),
"recall": "+(" + " ".join(["id:" + str(doc) for doc in doc_ids]) + ")",
"timeout": "15s",
"presentation.format": "json",
"ranking": {"profile": rankprofile, "listFeatures": "true"},
}
return body
def create_request_top_hits(query, rankprofile, hits):
body = {
"yql": "select id, rankfeatures from sources * where (userInput(@userQuery))",
"userQuery": query,
"hits": hits,
"timeout": "15s",
"presentation.format": "json",
"ranking": {"profile": rankprofile, "listFeatures": "true"},
}
return body
def get_features(url, body):
r = requests.post(url, json=body)
if r.status_code != requests.codes.ok:
print("Bad response code for request: " + str(body))
return {}
result = r.json()
hits = []
if "children" in result["root"]:
hits = result["root"]["children"]
return hits
def annotate_data(hits, query_id, relevant_id):
data = []
for h in hits:
rankfeatures = h["fields"]["rankfeatures"]
rankfeatures.update({"docid": h["fields"]["id"]})
rankfeatures.update({"qid": query_id})
rankfeatures.update({"relevant": 1 if h["fields"]["id"] == relevant_id else 0})
data.append(rankfeatures)
return data
def load_processed_queries(file_path):
try:
f_processed = open(file_path)
processed_queries = [int(x) for x in f_processed.readlines()]
f_processed.close()
except FileNotFoundError:
processed_queries = []
return processed_queries
def build_dataset(url, query_relevance, rank_profile, number_random_sample):
processed_queries = load_processed_queries(file_path=PROCESSED_QUERIES_FILE)
number_queries = len(query_relevance) - len(processed_queries)
line = 0
for qid, (query, relevant_id) in query_relevance.items():
if int(qid) not in processed_queries:
line += 1
print("{0}/{1}".format(line, number_queries))
relevant_id_request = create_request_specific_ids(
query=query, rankprofile=rank_profile, doc_ids=[relevant_id]
)
hits = get_features(url=url, body=relevant_id_request)
if len(hits) == 1 and hits[0]["fields"]["id"] == relevant_id:
random_hits_request = create_request_top_hits(
query=query, rankprofile=rank_profile, hits=number_random_sample
)
hits.extend(get_features(url=url, body=random_hits_request))
features = annotate_data(
hits=hits, query_id=qid, relevant_id=relevant_id
)
if os.path.isfile(OUTPUT_FILE):
| DataFrame.from_records(features) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import yfinance as yf
from pandas import Series
from prettytable import PrettyTable
from Common.Readers.Engine.AbstractEngine import AbstractEngine
from Common.StockType.AbstractStock import AbstractStock
from Common.StockType.Equities.AbstractStockEquity import AbstractStockEquity
from Common.StockType.Funds.ExchangeTraded.ExchangeTradedFund import ExchangeTradedFund
from Common.StockType.Funds.Index.IndexFund import IndexFund
from Common.StockType.Funds.Mutual.MutualFund import MutualFund
from Common.StockType.Currencies.Crypto.CryptoCurrency import CryptoCurrency
from Common.StockType.Currencies.Regular.RegularCurrency import RegularCurrency
from Common.StockType.Futures.AbstractStockFuture import AbstractStockFuture
class YahooFinanceEngine(AbstractEngine):
__pretty_table: PrettyTable = PrettyTable()
_info_labels: list = list()
_info_list: list = list()
__ticker: str = 'NA'
_stock_type: AbstractStock
_url: str = 'NA'
_url_logo: str = 'NA'
_address1: str = 'NA'
_address2: str = 'NA'
_city: str = 'NA'
_company_name: str = 'NA'
_country: str = 'NA'
_currency: str = 'NA'
_exchange: str = 'NA'
_fax: str = 'NA'
_state: str = 'NA'
_phone: str = 'NA'
_postal_code: str = 'NA'
_market: str = 'NA'
_market_cap: str = 'NA'
_quote_type: str = 'NA'
_beta: float = -1.1
_high52: float = -1.1
_low52: float = -1.1
_high_today: float = -1.1
_low_today: float = -1.1
_avg50: float = -1.1
_avg200: float = -1.1
_ratio_payout: float = -1.1
_ratio_peg: float = -1.1
_ratio_short: float = -1.1
_pe_forward: float = -1.1
_pe_trailing: float = -1.1
_book_value: float = -1.1
_book_price_to: float = -1.1
_ent_value: int = -1
_ent2revenue: float = -1.1
_ent2ebitda: float = -1.1
_div_rate: float = -1.1
_div_5y_avg_yield: float = -1.1
_div_yield: float = -1.1
_div_last_value: float = -1.1
_div_last_date: int = -1
_div_ex_date: int = -1
_div_last_date: int = -1
_split_date: int = -1
_fiscal_year_end_last: int = -1
_fiscal_year_end_next: int = -1
_last_quarter: int = -1
InfoDic: dict # = dict()
ActionsDf: pd.DataFrame = pd.DataFrame()
Balance_SheetDf: pd.DataFrame = pd.DataFrame()
BalanceSheetDf: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = [
(
SemiMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
),
(
SemiMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1),
},
),
(
| SemiMonthBegin(-1) | pandas._libs.tslibs.offsets.SemiMonthBegin |
#dd-cell
import pandas as pd
df_customers = inputs['Customer']
customers = df_customers['CustomerID'].values
df_customers.set_index('CustomerID', inplace=True)
df_offers = inputs['Offer']
offers = df_offers['OfferID'].values
df_offers.set_index('OfferID', inplace=True)
df_channels = inputs['Channel']
channels = df_channels['ChannelID'].values
df_channels.set_index('ChannelID', inplace=True)
df_candidates = inputs['Candidate']
candidates = []
for index, row in df_candidates.iterrows():
candidate = {}
candidates.append( (row['CustomerID'], row['OfferID'],row['ChannelID']) )
df_candidates.set_index(["CustomerID", "OfferID","ChannelID"], inplace = True)
df_parameters = inputs['parameters']
parameters = df_parameters['name'].values
df_parameters.set_index('name', inplace=True)
df_channels.head()
#dd-cell
from docplex.mp.model import Model
mdl = Model("RetailPlanning")
selected = mdl.binary_var_dict(candidates, name="selected")
df_selected = | pd.DataFrame({'selected': selected}) | pandas.DataFrame |
""" Figure 2
1. Load data
2. Run statistical tests
3. Prepare figure
4. Plot learning-rate simulations
5. Plot parameter estimates
6. Add subplot labels and save figure
"""
import numpy as np
import pandas as pd
import matplotlib
import seaborn as sns
import os
from al_utilities import get_stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from AlAgentVars import AgentVars
from AlAgent import AlAgent
from al_plot_utils import cm2inch, label_subplots, latex_plt, swarm_boxplot
# Update matplotlib to use Latex and to change some defaults
os.environ["PATH"] += os.pathsep + '/usr/local/texlive/2016/bin/x86_64-darwin'
matplotlib = latex_plt(matplotlib)
# Get home directory
paths = os.getcwd()
path = paths.split(os.path.sep)
home_dir = path[1]
# ------------
# 1. Load data
# ------------
model_results = pd.read_pickle('al_data/estimates_first_exp_25_sp.pkl')
# -------------------------
# 2. Runs statistical tests
# -------------------------
# Print out statistics for paper
print('\n\nMotor-noise parameter\n')
median_omikron_0, q1_omikron_0, q3_omikron_0, p_omikron_0, stat_omikron_0 = get_stats(model_results, 1, 'omikron_0')
print('\n\nLearning-rate-noise parameter\n')
median_omikron_1, q1_omikron_1, q3_omikron_1, p_omikron_1, stat_omikron_1 = get_stats(model_results, 1, 'omikron_1')
print('\n\nb_0 parameter 1\n')
median_b_0, q1_b_0, q3_b_0, p_b_0, stat_b_0 = get_stats(model_results, 1, 'b_0')
print('\n\nb_1 parameter\n')
median_b_1, q1_b_1, q3_b_1, p_b_1, stat_b_1 = get_stats(model_results, 1, 'b_1')
print('\n\nUncertainty-underestimation parameter\n')
median_u, q1_u, q3_u, p_u, stat_u = get_stats(model_results, 1, 'u')
print('\n\nSurprise-sensitivity parameter\n')
median_s, q1_s, q3_s, p_s, stat_s = get_stats(model_results, 1, 's')
print('\n\nHazard-rate parameter\n')
median_h, q1_h, q3_h, p_h, stat_h = get_stats(model_results, 1, 'h')
print('\n\nCatch-trial parameter\n')
median_sigma_H, q1_sigma_H, q3_sigma_H, p_sigma_H, stat_sigma_H = get_stats(model_results, 1, 'sigma_H')
print('\n\nReward-bias parameter\n')
median_q, q1_q, q3_q, p_q, stat_q = get_stats(model_results, 1, 'q')
# Create data frames to save statistics for Latex manuscript
fig_2_desc = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import polling2
import requests
import json
from web3 import Web3
import pandas as pd
from decouple import config
from datetime import datetime
import logging
from collections import defaultdict
import time
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from models import EdenBlock, Epoch, Base, Distribution, DistributionBalance
from apscheduler.schedulers.background import BackgroundScheduler
INFURA_ENDPOINT = config('INFURA_ENDPOINT')
PSQL_ENDPOINT = config('PSQL_ENDPOINT')
engine = create_engine(PSQL_ENDPOINT)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query_dict = {
'block': 'block.graphql',
'distribution': 'distribution.graphql',
'block_lookup': 'block_lookup.graphql',
'epoch_latest': 'epoch_latest.graphql',
'epoch': 'epoch.graphql'
}
eden_governance_api = 'https://api.thegraph.com/subgraphs/name/eden-network/governance'
eden_distribution_api = 'https://api.thegraph.com/subgraphs/name/eden-network/distribution'
eden_network_api = 'https://api.thegraph.com/subgraphs/name/eden-network/network'
def query_to_dict(rset):
result = defaultdict(list)
for obj in rset:
instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
def get_web3_provider():
infura_endpoint = INFURA_ENDPOINT
my_provider = Web3.HTTPProvider(infura_endpoint)
w3 = Web3(my_provider)
return w3
def get_latest_eth_block():
eden_db_last_block = get_latest_eden_block_db()
w3 = get_web3_provider()
latest_eth_block = w3.eth.get_block('latest')['number']
if latest_eth_block > eden_db_last_block:
return latest_eth_block
else:
return None
def get_latest_eden_block_db():
eden_db_last_block = session.query(EdenBlock).order_by(desc(EdenBlock.block_number)).limit(1).all()
if eden_db_last_block != []:
eden_db_last_block = eden_db_last_block[0].block_number
else:
eden_db_last_block = 0
return eden_db_last_block
def clean_epoch_entry(epoch_string):
epoch_number = int(epoch_string.split('+')[1].replace('epoch', ''))
return int(epoch_number)
def get_latest_distribution_number():
eden_db_last_number_query = session.query(Distribution).order_by(desc(Distribution.distribution_number)).limit(1).all()
if eden_db_last_number_query != []:
eden_last_number = eden_db_last_number_query[0].distribution_number
return eden_last_number
else:
return 0
def ipfs_link_cleanup(raw_uri):
final_ipfs_link = "https://ipfs.io/ipfs/" + raw_uri.split('//')[1]
return final_ipfs_link
def graph_query_call(api, query, variables=None):
request = requests.post(api, json={'query': query, 'variables': variables})
if request.status_code == 200:
return request.json()
else:
Exception('Query failed. return code is {}. {}'.format(request.status_code, query))
def fetch_query(query):
query_file = query_dict.get(query)
with open(query_file, 'r') as file:
data = file.read()
return data
def get_epoch_number(block_number):
epoch_number_query = session.query(Epoch).filter(block_number >= Epoch.start_block_number, block_number <= Epoch.end_block_number).limit(1).all()
if epoch_number_query != []:
epoch_number = epoch_number_query[0].epoch_number
return epoch_number
else:
latest_epoch = get_latest_epoch()
return latest_epoch
def get_latest_epoch():
query = fetch_query('epoch_latest')
latest_epoch_result = graph_query_call(eden_governance_api, query)
latest_epoch_id = latest_epoch_result['data']['epoches'][0]['id']
latest_epoch_number = clean_epoch_entry(latest_epoch_id)
return latest_epoch_number
def get_block_number_from_id(block_id):
query = fetch_query('block_lookup')
variables = {'block_id': block_id}
block_result = graph_query_call(eden_governance_api, query, variables)
eden_block_number = int(block_result['data']['block']['number'])
return eden_block_number
def eden_block_call():
last_block = 0
last_block_current = get_latest_eth_block()
eden_blocks_df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pdb
import logging
import numpy as np
import pandas as pd
from ais import analysis
TEST_VESSEL_LOCATION_FILE = "testdata/vessellocations.csv"
TEST_VESSEL_METADATA_FILE = "testdata/vesselmetadatas.csv"
TEST_ICE_CONDITION_FILE = "testdata/iceconditions.csv"
# some vessels and icebreakers in the dataset
AURA = 230601000
BBC_VIRGINIA=305463000
KIISLA=230956000
SISU=230289000
YMER=265066000
class TestAnalysis(unittest.TestCase):
@classmethod
def setUpClass(self):
pd.set_option('display.width', 240)
self.vl = pd.read_csv(TEST_VESSEL_LOCATION_FILE, parse_dates = ['timestamp'])
self.vm = pd.read_csv(TEST_VESSEL_METADATA_FILE, parse_dates = ['timestamp'])
self.ice = pd.read_csv(TEST_ICE_CONDITION_FILE, parse_dates = ['timestamp'])
def test_number_of_observations(self):
self.assertEqual(len(self.vl), 99063)
def test_append_sudden_stopping(self):
df = pd.DataFrame(data = {'timestamp': ['2013-01-01 00:00:00', '2013-01-01 00:00:01', '2013-01-01 00:00:02', '2013-01-01 00:00:03'],
'mmsi': [123, 123, 123, 123],
'sog': [10, 10, 7, 0]})
df['timestamp'] = pd.to_datetime(df['timestamp'])
analysis.append_sudden_stopping(df)
self.assertTrue(df.iloc[3]['sudden_stopping'])
self.assertTrue((df.iloc[0:2]['sudden_stopping'] == False).all())
def test_append_sudden_stopping_continuous(self):
df = pd.DataFrame(data = {'timestamp': ['2013-01-01 00:00:00', '2013-01-01 00:00:01', '2013-01-01 00:00:02', '2013-01-01 00:00:03', '2013-01-01 00:00:04'],
'mmsi': [123, 123, 123, 123, 123],
'sog': [12, 12, 13, 0, 0]})
df['timestamp'] = pd.to_datetime(df['timestamp'])
analysis.append_sudden_stopping(df)
self.assertTrue(df.iloc[3]['sudden_stopping'])
self.assertTrue((df.iloc[0:2]['sudden_stopping'] == False).all())
self.assertFalse(df.iloc[4]['sudden_stopping'])
def test_append_sudden_stopping_sample_data(self):
vl = self.vl
vl = vl[(vl['timestamp'] >= '2018-03-19 12:45:00') & (vl['timestamp'] < '2018-03-19 13:00:00')]
analysis.append_sudden_stopping(vl)
self.assertEqual(len(vl[vl['sudden_stopping'] == True]), 4)
@staticmethod
def create_vessel_meta_and_location_data(mmsi, time_offset='0 Seconds'):
vm = pd.DataFrame(data = {'timestamp': pd.to_datetime(['2013-01-01 00:00:00.102', '2013-01-01 00:00:00.104', '2013-01-01 00:00:00.112']) + pd.to_timedelta(time_offset),
'mmsi': [mmsi, mmsi, mmsi],
'name': ['name1_' + str(mmsi), 'name2_' + str(mmsi), 'name3_' + str(mmsi)],
'ship_type': [1, 1, 1],
'callsign': ['ABCD', 'ABCD', 'ABCD'],
'imo': [456789, 456789, 456789],
'destination': ['KEMI', 'KEMI', 'KEMI'],
'eta': [12345, 12345, 12345],
'draught': [45, 45, 45],
'pos_type': [1, 1, 1],
'reference_point_a': [100, 100, 100],
'reference_point_b': [10, 10, 10],
'reference_point_c': [20, 20, 20],
'reference_point_d': [25, 25, 25]})
vl = pd.DataFrame(data = {'timestamp': pd.to_datetime(['2013-01-01 00:00:00.100', '2013-01-01 00:00:00.101', '2013-01-01 00:00:00.103', '2013-01-01 00:00:00.105', '2013-01-01 00:00:00.106']) + pd.to_timedelta(time_offset),
'mmsi': [mmsi, mmsi, mmsi, mmsi, mmsi],
'lon': [23.2, 23.2, 23.2, 23.2, 23.2],
'lat': [65.1, 65.1, 65.1, 65.1, 65.1],
'sog': [10, 10, 10, 10, 10],
'cog': [120, 120, 120, 120, 120],
'heading': [121, 121, 121, 121, 121]})
return vm, vl
def test_merge_location_and_metadata(self):
vm, vl = self.create_vessel_meta_and_location_data(123)
df = analysis.merge_vessel_meta_and_location_data(vm, vl)
self.assertEqual(len(df), 3)
self.assertEqual(df.name.isin(['name1_123', 'name2_123']).any(), True)
self.assertEqual(~df.name.isin(['name3_123']).any(), True)
def test_merge_multiple_location_and_metadata(self):
vm, vl = self.create_vessel_meta_and_location_data(123, '0 Seconds')
vm2, vl2 = self.create_vessel_meta_and_location_data(456, '10 Seconds')
vm = vm.append(vm2)
vl = vl.append(vl2)
df = analysis.merge_vessel_meta_and_location_data(vm, vl)
df = df.sort_values(by=['timestamp', 'mmsi'])
self.assertEqual(len(df), 6)
self.assertEqual(df.name.isin(['name1_123', 'name2_123']).any(), True)
self.assertEqual(~df.name.isin(['name3_123']).any(), True)
def test_merge_location_and_ice_condition(self):
vl = pd.DataFrame(data = {'timestamp': | pd.to_datetime(['2018-03-21 00:00:00.100', '2018-03-21 00:00:00.101', '2018-03-21 00:00:00.103', '2018-03-21 00:00:00.105', '2018-03-21 00:00:00.106']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import networkx as nx
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import cosine_similarity
from bokeh.io import show
from bokeh.plotting import figure
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, LinearColorMapper, ColumnDataSource, LabelSet, SaveTool, ColorBar, BasicTicker
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral8
def visualize_sentences(vecs, sentences, palette="Viridis256"):
tsne = TSNE(n_components=2)
tsne_results = tsne.fit_transform(vecs)
df = pd.DataFrame(columns=['x', 'y', 'sentence'])
df['x'], df['y'], df['sentence'] = tsne_results[:, 0], tsne_results[:, 1], sentences
source = ColumnDataSource(ColumnDataSource.from_df(df))
labels = LabelSet(x="x", y="y", text="sentence", y_offset=8,
text_font_size="12pt", text_color="#555555",
source=source, text_align='center')
color_mapper = LinearColorMapper(palette=palette, low=min(tsne_results[:, 1]), high=max(tsne_results[:, 1]))
plot = figure(plot_width=900, plot_height=900)
plot.scatter("x", "y", size=12, source=source, color={'field': 'y', 'transform': color_mapper}, line_color=None, fill_alpha=0.8)
plot.add_layout(labels)
show(plot, notebook_handle=True)
"""
Visualize homonyms (2d vector space)
Inspired by:
https://github.com/hengluchang/visualizing_contextual_vectors/blob/master/elmo_vis.py
"""
def visualize_homonym(homonym, tokenized_sentences, vecs, model_name, palette="Viridis256"):
# process sentences
token_list, processed_sentences = [], []
for tokens in tokenized_sentences:
token_list.extend(tokens)
sentence = []
for token in tokens:
if model_name == "bert":
processed_token = token.replace("##", "")
else:
processed_token = token
if token == homonym:
processed_token = "\"" + processed_token + "\""
sentence.append(processed_token)
processed_sentences.append(' '.join(sentence))
# dimension reduction
tsne = TSNE(n_components=2)
tsne_results = tsne.fit_transform(vecs[1:])
# only plot the word representation of interest
interest_vecs, idx = np.zeros((len(tokenized_sentences), 2)), 0
for word, vec in zip(token_list, tsne_results):
if word == homonym:
interest_vecs[idx] = vec
idx += 1
df = | pd.DataFrame(columns=['x', 'y', 'annotation']) | pandas.DataFrame |
import io
import os
import time
import gym
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from datetime import datetime
from threading import Thread
from rl_visualization.app import start_app
from math import sqrt, ceil
sns.set(style='whitegrid')
sns.set_context('paper')
class VisualizationEnv(gym.Wrapper):
def __init__(self, env, agent=None, steps_lookback=1000, episodic=True, features_names=None, actions_names=None, refresh_time=20, path='./logs'):
"""Gym Env wrapper for visualization
Args:
env (gym.Env): Gym Env to be wrapped
"""
super().__init__(env)
self.env = env
self.agent = agent
self.steps_lookback = steps_lookback
self.episodic = episodic
self.user_plots = {}
self.user_plots_values = {}
if isinstance(self.observation_space, gym.spaces.Discrete):
self.state_dim = self.observation_space.n
elif isinstance(self.observation_space, gym.spaces.Box):
self.state_dim = self.observation_space.shape[0]
else:
exit('Observation space not supported.')
if isinstance(self.action_space, gym.spaces.Discrete):
self.action_dim = self.action_space.n
elif isinstance(self.action_space, gym.spaces.Box):
self.action_dim = self.action_space.shape[0]
else:
exit('Action space not supported')
if features_names is not None:
self.features_names = features_names
else:
self.features_names = ['feature_'+str(i) for i in range(self.state_dim)]
if actions_names is not None:
self.actions_names = actions_names
else:
self.actions_names = ['action'+str(i) for i in range(self.action_dim)]
if not os.path.exists(path):
os.mkdir(path)
self.filepath = os.path.join(path, 'rl_vis' + str(datetime.now()).split('.')[0] + "." + 'csv')
self.refresh_time = refresh_time
self.delay = 0
self.experiences = []
self.epsilon = []
self.sa_counter = Counter()
self.obs = None
self.start_app()
def set_agent(self, agent):
self.agent = agent
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
if self.delay > 0:
time.sleep(self.delay)
self.experiences.append((self.obs, action, reward, next_obs, done))
if self.agent is not None and hasattr(self.agent, 'q_table'):
self.sa_counter.update([(self.env.encode(self.obs), action)])
self.obs = next_obs
for plot in self.user_plots:
self.user_plots_values[plot].append(self.user_plots[plot]())
return next_obs, reward, done, info
def reset(self):
self.obs = self.env.reset()
return self.obs
def start_app(self):
self.app_process = Thread(target=start_app, args=(self,))
self.app_process.start()
def get_available_plots(self):
plots = []
if len(self.experiences) == 0:
return plots
if self.agent is not None and hasattr(self.agent, 'q_table'):
plots.append('Q-table')
plots.append('Visit Count')
self.q_table_to_df()
plots.append('Rewards')
if self.episodic:
plots.append('Episode Rewards')
plots.extend(['Features Distributions', 'Actions Distributions'])
plots.extend(self.user_plots.keys())
return plots
def add_plot(self, title, get_func):
self.user_plots[title] = get_func
self.user_plots_values[title] = []
def get_userplot(self, title):
f, ax = plt.subplots(figsize=(14, 8))
plt.title(title)
plt.xlabel('step')
plt.plot(self.user_plots_values[title])
plt.tight_layout()
bytes_image = io.BytesIO()
plt.savefig(bytes_image, format='png')
plt.close()
bytes_image.seek(0)
return bytes_image
def get_featuresdistribution(self):
f, ax = plt.subplots(figsize=(14, 8))
plt.title('Features Distribution')
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[0]
d.append({self.features_names[i]: s[i] for i in range(self.state_dim)})
df = pd.DataFrame(d)
n = ceil(sqrt(self.state_dim))
for i in range(self.state_dim):
plt.subplot(1 if n == self.state_dim else n, n, i+1)
sns.distplot(df[self.features_names[i]], hist=True, color="b", kde_kws={"shade": True})
plt.tight_layout()
return self.plot_to_bytes(plt)
def get_actionsdistribution(self):
f, ax = plt.subplots()
plt.title('Actions Distribution')
if not hasattr(self.experiences[0][1], '__len__'): # int, float or numpy.int
d = []
for exp in self.experiences[-self.steps_lookback:]:
a = exp[1]
d.append({'Action': self.actions_names[a]})
df = pd.DataFrame(d)
sns.catplot(x="Action", kind="count", data=df)
else:
d = []
for exp in self.experiences[-self.steps_lookback:]:
s = exp[1]
d.append({self.actions_names[i]: s[i] for i in range(self.action_dim)})
df = | pd.DataFrame(d) | pandas.DataFrame |
"""A collection of functions that assist in validation/comparison of data and conditions.
"""
from collections.abc import Sized
from typing import List, Union, Callable, Type, Iterable
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_categorical
from helpsk.exceptions import * # pylint: disable=wildcard-import,unused-wildcard-import
from helpsk.utility import suppress_warnings
def any_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> bool:
"""Can be used with a single value or a collection of values. Returns `True` if any item in `values` are
`None`, `np.Nan`, `pd.NA`, `pd.NaT` or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN
"""
# pylint: disable=too-many-return-statements
if values is None or values is np.NaN or values is pd.NA or values is pd.NaT: # pylint: disable=nan-comparison
return True
if isinstance(values, Sized) and not isinstance(values, str) and len(values) == 0:
return True
if isinstance(values, pd.Series):
return values.isnull().any() or values.isna().any()
if isinstance(values, pd.DataFrame):
return values.isnull().any().any() or values.isna().any().any()
if isinstance(values, Iterable) and not isinstance(values, str):
if len(values) == 0:
return True
return any((any_none_nan(x) for x in values))
try:
if not isinstance(values, str) and None in values:
return True
except Exception: # pylint: disable=broad-except # noqa
pass
try:
if np.isnan(values).any():
return True
except TypeError:
return False
return False
def assert_not_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> None:
"""Raises an HelpskAssertionError if any item in `values` are `None`, `np.Nan`, or if the length of
`values` is `0`.
For numeric types only.
Args:
values:
A collection of values to check.
"""
assert_false(any_none_nan(values), message='None/NaN Values Found')
def any_missing(values: Union[List, pd.Series, pd.DataFrame, object]) -> bool:
"""Same as `any_none_nan` but checks for empty strings
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN/''
"""
if any_none_nan(values):
return True
if isinstance(values, pd.Series):
return values.isin(['']).any() # noqa
if isinstance(values, pd.DataFrame):
return values.isin(['']).any().any() # noqa
if isinstance(values, str) and values.strip() == '':
return True
if isinstance(values, Iterable) and '' in values:
return True
return False
def assert_not_any_missing(values: Union[List, pd.Series, pd.DataFrame, object]) -> None:
"""Raises an HelpskAssertionError if any item in `values` are `None`, `np.Nan`, an empty string (i.e. '')
or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
"""
assert_false(any_missing(values), message='Missing Values Found')
def any_duplicated(values: Union[List, np.ndarray, pd.Series]) -> bool:
"""Returns `True` if any items in `values` are duplicated.
Args:
values: list, np.ndarray, pd.Series
A collection of values to check.
Returns:
bool
"""
return len(values) != len(set(values))
def assert_not_duplicated(values: Union[List, np.ndarray, pd.Series]) -> None:
"""Raises an HelpskAssertionError if any items in `values` are duplicated.
Args:
values: list, np.ndarray, pd.Series
A collection of values to check.
"""
assert_false(any_duplicated(values), message='Duplicate Values Found')
def assert_all(values: Union[List, np.ndarray, pd.Series, pd.DataFrame]) -> None:
"""Raises an `HelpskAssertionError` unless all items in `values` are `True`
Args:
values:
A collection of values to check.
"""
if isinstance(values, pd.Series):
if not values.all(): # noqa
raise HelpskAssertionError('Not All True')
elif isinstance(values, pd.DataFrame):
if not values.all().all(): # noqa
raise HelpskAssertionError('Not All True')
else:
if not all(values):
raise HelpskAssertionError('Not All True')
def assert_not_any(values: Union[List, np.ndarray, pd.Series, pd.DataFrame]) -> None:
"""Raises an `HelpskAssertionError` if any items in `values` are `True`
Args:
values:
A collection of values to check.
"""
if isinstance(values, pd.Series):
assert_false(values.any(), message='Found True') # noqa
elif isinstance(values, pd.DataFrame):
assert_false(values.any().any(), message='Found True') # noqa
else:
assert_false(any(values), message='Found True')
def assert_true(condition: bool, message: str = 'Condition Not True') -> None:
"""Raises an HelpskAssertionError if `condition` is not True
Args:
condition:
Something that evaluates to True/False
message:
Message passed to the HelpskAssertionError
"""
if not isinstance(condition, (bool, np.bool_)):
raise HelpskParamTypeError('condition should be boolean')
if not condition:
raise HelpskAssertionError(message)
def assert_false(condition: bool, message: str = 'Condition True') -> None:
"""Raises an HelpskAssertionError if `condition` is not False
Args:
condition: bool
Something that evaluates to True/False
message:
Message passed to the HelpskAssertionError
"""
if not isinstance(condition, (bool, np.bool_)):
raise HelpskParamTypeError('condition should be boolean')
if condition:
raise HelpskAssertionError(message)
def iterables_are_equal(iterable_a: Iterable, iterable_b: Iterable) -> bool:
"""Compares the equality of the values of two iterables.
This function will generally give the same result as list equality (e.g. `[x, y, z] == [x, y, z]`)
However, in some strange scenarios, `==` will return `False` where it doesn't seem like it should
For example:
```
temp = pd.DataFrame({'col_a': [np.nan, 1.0]})
temp.col_a.tolist() == [np.nan, 1.0] # returns False. Why??
iterables_are_equal(temp.col_a.tolist(), [np.nan, 1]) # returns True
[np.nan, 1.0] == [np.nan, 1.0] # returns True
Also, when comparing a series with an ordered Categorical when the values are the same,
pd.Series.equals() will return False if the categories have different order. But we only care if the
values are the same, so this function will return True.
```
Args:
iterable_a:
an iterable to equate to iterable_b
iterable_b:
an iterable to equate to iterable_a
Returns:
True if iterable_a is equal to iterable_b
"""
# seems to be confusion and inconsistencies across stack overflow on how to properly check for category
# so this might be overkill but not exactly sure
# def is_categorical(series):
# if isinstance(series, (pd.Categorical, pd.CategoricalDtype)):
# return True
# if isinstance(series, pd.Series):
# return series.dtype.name == 'category'
# return False
with suppress_warnings():
# if either list-like structure is categorical, then we need to convert both to unordered categorical
if is_categorical(iterable_a) or is_categorical(iterable_b):
iterable_a = | pd.Categorical(iterable_a, ordered=False) | pandas.Categorical |
from __future__ import annotations
from typing import List, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from pandas.util._decorators import Appender, Substitution
from scipy import stats
from statsmodels.iolib.summary import Summary, fmt_2cols, fmt_params
from statsmodels.iolib.table import SimpleTable
from statsmodels.regression.linear_model import OLS, RegressionResults
import arch.covariance.kernel as lrcov
from arch.typing import ArrayLike1D, ArrayLike2D, Float64Array, Literal, UnitRootTrend
from arch.unitroot._engle_granger import EngleGrangerTestResults, engle_granger
from arch.unitroot._phillips_ouliaris import (
CriticalValueWarning,
PhillipsOuliarisTestResults,
phillips_ouliaris,
)
from arch.unitroot._shared import (
KERNEL_ERR,
KERNEL_ESTIMATORS,
_check_cointegrating_regression,
_check_kernel,
_cross_section,
)
from arch.unitroot.unitroot import SHORT_TREND_DESCRIPTION
from arch.utility.array import ensure2d
from arch.utility.io import pval_format, str_format
from arch.utility.timeseries import add_trend
from arch.vendor import cached_property
__all__ = [
"engle_granger",
"EngleGrangerTestResults",
"DynamicOLS",
"DynamicOLSResults",
"phillips_ouliaris",
"PhillipsOuliarisTestResults",
"CriticalValueWarning",
]
class _CommonCointegrationResults(object):
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
df_adjust: bool,
r2: float,
adj_r2: float,
estimator_type: str,
):
self._params = params
self._cov = cov
self._resid = resid
self._bandwidth = kernel_est.bandwidth
self._kernel = kernel_est.__class__.__name__
self._kernel_est = kernel_est
self._num_x = num_x
self._trend = trend
self._df_adjust = df_adjust
self._ci_size = params.shape[0]
self._rsquared = r2
self._rsquared_adj = adj_r2
self._estimator_type = estimator_type
@property
def params(self) -> pd.Series:
"""The estimated parameters of the cointegrating vector"""
return self._params.iloc[: self._ci_size]
@cached_property
def std_errors(self) -> pd.Series:
"""
Standard errors of the parameters in the cointegrating vector
"""
se = np.sqrt(np.diag(self.cov))
return pd.Series(se, index=self.params.index, name="std_errors")
@cached_property
def tvalues(self) -> pd.Series:
"""
T-statistics of the parameters in the cointegrating vector
"""
return pd.Series(self.params / self.std_errors, name="tvalues")
@cached_property
def pvalues(self) -> pd.Series:
"""
P-value of the parameters in the cointegrating vector
"""
return pd.Series(2 * (1 - stats.norm.cdf(np.abs(self.tvalues))), name="pvalues")
@property
def cov(self) -> pd.DataFrame:
"""The estimated parameter covariance of the cointegrating vector"""
return self._cov.iloc[: self._ci_size, : self._ci_size]
@property
def resid(self) -> pd.Series:
"""The model residuals"""
return self._resid
@property
def kernel(self) -> str:
"""The kernel used to estimate the covariance"""
return self._kernel
@property
def bandwidth(self) -> float:
"""The bandwidth used in the parameter covariance estimation"""
return self._bandwidth
@property
def rsquared(self) -> float:
"""The model R²"""
return self._rsquared
@property
def rsquared_adj(self) -> float:
"""The degree-of-freedom adjusted R²"""
return self._rsquared_adj
@cached_property
def _cov_est(self) -> lrcov.CovarianceEstimate:
r = np.asarray(self._resid)
kern_class = self._kernel_est.__class__
bw = self._bandwidth
force_int = self._kernel_est.force_int
cov_est = kern_class(r, bandwidth=bw, center=False, force_int=force_int)
return cov_est.cov
@property
def _df_scale(self) -> float:
if not self._df_adjust:
return 1.0
nobs = self._resid.shape[0]
nvar = self.params.shape[0]
return nobs / (nobs - nvar)
@property
def residual_variance(self) -> float:
r"""
The variance of the regression residual.
Returns
-------
float
The estimated residual variance.
Notes
-----
The residual variance only accounts for the short-run variance of the
residual and does not account for any autocorrelation. It is defined
as
.. math::
\hat{\sigma}^2 = T^{-1} \sum _{t=p}^{T-q} \hat{\epsilon}_t^2
If `df_adjust` is True, then the estimator is rescaled by T/(T-m) where
m is the number of regressors in the model.
"""
return self._df_scale * self._cov_est.short_run[0, 0]
@property
def long_run_variance(self) -> float:
"""
The long-run variance of the regression residual.
Returns
-------
float
The estimated long-run variance of the residual.
The long-run variance is estimated from the model residuals
using the same kernel used to estimate the parameter
covariance.
If `df_adjust` is True, then the estimator is rescaled by T/(T-m) where
m is the number of regressors in the model.
"""
return self._df_scale * self._cov_est.long_run[0, 0]
@staticmethod
def _top_table(
top_left: Sequence[Tuple[str, str]],
top_right: Sequence[Tuple[str, str]],
title: str,
) -> SimpleTable:
stubs = []
vals = []
for stub, val in top_left:
stubs.append(stub)
vals.append([val])
table = SimpleTable(vals, txt_fmt=fmt_2cols, title=title, stubs=stubs)
fmt = fmt_2cols.copy()
fmt["data_fmts"][1] = "%18s"
top_right = [("%-21s" % (" " + k), v) for k, v in top_right]
stubs = []
vals = []
for stub, val in top_right:
stubs.append(stub)
vals.append([val])
table.extend_right(SimpleTable(vals, stubs=stubs))
return table
def _top_right(self) -> List[Tuple[str, str]]:
top_right = [
("No. Observations:", str(self._resid.shape[0])),
("R²:", str_format(self.rsquared)),
("Adjusted. R²:", str_format(self.rsquared_adj)),
("Residual Variance:", str_format(self.residual_variance)),
("Long-run Variance:", str_format(self.long_run_variance)),
("", ""),
]
return top_right
@staticmethod
def _param_table(
params: Float64Array,
se: Float64Array,
tstats: Float64Array,
pvalues: Float64Array,
stubs: Sequence[str],
title: str,
) -> SimpleTable:
ci = params[:, None] + se[:, None] * stats.norm.ppf([[0.025, 0.975]])
param_data = np.column_stack([params, se, tstats, pvalues, ci])
data = []
for row in param_data:
txt_row = []
for i, v in enumerate(row):
f = str_format
if i == 3:
f = pval_format
txt_row.append(f(v))
data.append(txt_row)
header = ["Parameter", "Std. Err.", "T-stat", "P-value", "Lower CI", "Upper CI"]
table = SimpleTable(
data, stubs=stubs, txt_fmt=fmt_params, headers=header, title=title
)
return table
def summary(self) -> Summary:
"""
Summary of the model, containing estimated parameters and std. errors
Returns
-------
Summary
A summary instance with method that support export to text, csv
or latex.
"""
if self._bandwidth != int(self._bandwidth):
bw = str_format(self._bandwidth)
else:
bw = str(int(self._bandwidth))
top_left = [
("Trend:", SHORT_TREND_DESCRIPTION[self._trend]),
("Kernel:", str(self._kernel)),
("Bandwidth:", bw),
("", ""),
("", ""),
("", ""),
]
top_right = self._top_right()
smry = Summary()
title = self._estimator_type
table = self._top_table(top_left, top_right, title)
# Top Table
# Parameter table
smry.tables.append(table)
params = np.asarray(self.params)
stubs = list(self.params.index)
se = np.asarray(self.std_errors)
tstats = np.asarray(self.tvalues)
pvalues = np.asarray(self.pvalues)
title = "Cointegrating Vector"
table = self._param_table(params, se, tstats, pvalues, stubs, title)
smry.tables.append(table)
return smry
class DynamicOLSResults(_CommonCointegrationResults):
"""
Estimation results for Dynamic OLS models
Parameters
----------
params : Series
The estimated model parameters.
cov : DataFrame
The estimated parameter covariance.
resid : Series
The model residuals.
lags : int
The number of lags included in the model.
leads : int
The number of leads included in the model.
cov_type : str
The type of the parameter covariance estimator used.
kernel_est : CovarianceEstimator
The covariance estimator instance used to estimate the parameter
covariance.
reg_results : RegressionResults
Regression results from fitting statsmodels OLS.
df_adjust : bool
Whether to degree of freedom adjust the estimator.
"""
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
lags: int,
leads: int,
cov_type: str,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
reg_results: RegressionResults,
df_adjust: bool,
) -> None:
super().__init__(
params,
cov,
resid,
kernel_est,
num_x,
trend,
df_adjust,
r2=reg_results.rsquared,
adj_r2=reg_results.rsquared_adj,
estimator_type="Dynamic OLS",
)
self._leads = leads
self._lags = lags
self._cov_type = cov_type
self._ci_size = params.shape[0] - self._num_x * (leads + lags + 1)
@property
def full_params(self) -> pd.Series:
"""The complete set of parameters, including leads and lags"""
return self._params
@property
def full_cov(self) -> pd.DataFrame:
"""
Parameter covariance of the all model parameters, incl. leads and lags
"""
return self._cov
@property
def lags(self) -> int:
"""The number of lags included in the model"""
return self._lags
@property
def leads(self) -> int:
"""The number of leads included in the model"""
return self._leads
@property
def cov_type(self) -> str:
"""The type of parameter covariance estimator used"""
return self._cov_type
@property
def _df_scale(self) -> float:
if not self._df_adjust:
return 1.0
nobs = self._resid.shape[0]
nvar = self.full_params.shape[0]
return nobs / (nobs - nvar)
def summary(self, full: bool = False) -> Summary:
"""
Summary of the model, containing estimated parameters and std. errors
Parameters
----------
full : bool, default False
Flag indicating whether to include all estimated parameters
(True) or only the parameters of the cointegrating vector
Returns
-------
Summary
A summary instance with method that support export to text, csv
or latex.
"""
if self._bandwidth != int(self._bandwidth):
bw = str_format(self._bandwidth)
else:
bw = str(int(self._bandwidth))
top_left = [
("Trend:", SHORT_TREND_DESCRIPTION[self._trend]),
("Leads:", str(self._leads)),
("Lags:", str(self._lags)),
("Cov Type:", str(self._cov_type)),
("Kernel:", str(self._kernel)),
("Bandwidth:", bw),
]
top_right = self._top_right()
smry = Summary()
typ = "Cointegrating Vector" if not full else "Model"
title = f"Dynamic OLS {typ} Summary"
table = self._top_table(top_left, top_right, title)
# Top Table
# Parameter table
smry.tables.append(table)
if full:
params = np.asarray(self.full_params)
stubs = list(self.full_params.index)
se = np.sqrt(np.diag(self.full_cov))
tstats = params / se
pvalues = 2 * (1 - stats.norm.cdf(np.abs(tstats)))
else:
params = np.asarray(self.params)
stubs = list(self.params.index)
se = np.asarray(self.std_errors)
tstats = np.asarray(self.tvalues)
pvalues = np.asarray(self.pvalues)
title = "Cointegrating Vector" if not full else "Model Parameters"
assert isinstance(se, np.ndarray)
table = self._param_table(params, se, tstats, pvalues, stubs, title)
smry.tables.append(table)
return smry
class DynamicOLS(object):
r"""
Dynamic OLS (DOLS) cointegrating vector estimation
Parameters
----------
y : array_like
The left-hand-side variable in the cointegrating regression.
x : array_like
The right-hand-side variables in the cointegrating regression.
trend : {"n","c","ct","ctt"}, default "c"
Trend to include in the cointegrating regression. Trends are:
* "n": No deterministic terms
* "c": Constant
* "ct": Constant and linear trend
* "ctt": Constant, linear and quadratic trends
lags : int, default None
The number of lags to include in the model. If None, the optimal
number of lags is chosen using method.
leads : int, default None
The number of leads to include in the model. If None, the optimal
number of leads is chosen using method.
common : bool, default False
Flag indicating that lags and leads should be restricted to the same
value. When common is None, lags must equal leads and max_lag must
equal max_lead.
max_lag : int, default None
The maximum lag to consider. See Notes for value used when None.
max_lead : int, default None
The maximum lead to consider. See Notes for value used when None.
method : {"aic","bic","hqic"}, default "bic"
The method used to select lag length when lags or leads is None.
* "aic" - Akaike Information Criterion
* "hqic" - Hannan-Quinn Information Criterion
* "bic" - Schwartz/Bayesian Information Criterion
Notes
-----
The cointegrating vector is estimated from the regression
.. math ::
Y_t = D_t \delta + X_t \beta + \Delta X_{t} \gamma
+ \sum_{i=1}^p \Delta X_{t-i} \kappa_i
+ \sum _{j=1}^q \Delta X_{t+j} \lambda_j + \epsilon_t
where p is the lag length and q is the lead length. :math:`D_t` is a
vector containing the deterministic terms, if any. All specifications
include the contemporaneous difference :math:`\Delta X_{t}`.
When lag lengths are not provided, the optimal lag length is chosen to
minimize an Information Criterion of the form
.. math::
\ln\left(\hat{\sigma}^2\right) + k\frac{c}{T}
where c is 2 for Akaike, :math:`2\ln\ln T` for Hannan-Quinn and
:math:`\ln T` for Schwartz/Bayesian.
See [1]_ and [2]_ for further details.
References
----------
.. [1] <NAME>. (1992). Estimation and testing of cointegrated
systems by an autoregressive approximation. Econometric theory,
8(1), 1-27.
.. [2] <NAME>., & <NAME>. (1993). A simple estimator of
cointegrating vectors in higher order integrated systems.
Econometrica: Journal of the Econometric Society, 783-820.
"""
def __init__(
self,
y: ArrayLike1D,
x: ArrayLike2D,
trend: UnitRootTrend = "c",
lags: Optional[int] = None,
leads: Optional[int] = None,
common: bool = False,
max_lag: Optional[int] = None,
max_lead: Optional[int] = None,
method: Literal["aic", "bic", "hqic"] = "bic",
) -> None:
setup = _check_cointegrating_regression(y, x, trend)
self._y = setup.y
self._x = setup.x
self._trend = setup.trend
self._lags = lags
self._leads = leads
self._max_lag = max_lag
self._max_lead = max_lead
self._method = method
self._common = bool(common)
self._y_df = pd.DataFrame(self._y)
self._check_inputs()
def _check_inputs(self) -> None:
"""Validate the inputs"""
if not isinstance(self._method, str) or self._method.lower() not in (
"aic",
"bic",
"hqic",
):
raise ValueError('method must be one of "aic", "bic", or "hqic"')
max_lag = self._max_lag
self._max_lag = int(max_lag) if max_lag is not None else max_lag
max_lead = self._max_lead
self._max_lead = int(max_lead) if max_lead is not None else max_lead
self._leads = int(self._leads) if self._leads is not None else self._leads
self._lags = int(self._lags) if self._lags is not None else self._lags
if self._common and self._leads != self._lags:
raise ValueError(
"common is specified but leads and lags have different values"
)
if self._common and self._max_lead != self._max_lag:
raise ValueError(
"common is specified but max_lead and max_lag have different values"
)
max_ll = self._max_lead_lag()
obs_remaining = self._y.shape[0] - 1
obs_remaining -= max_ll if max_lag is None else max_lag
obs_remaining -= max_ll if max_lead is None else max_lead
if obs_remaining <= 0:
raise ValueError(
"max_lag and max_lead are too large for the amount of "
"data. The largest model specification in the search "
"cannot be estimated."
)
def _format_variables(
self, leads: int, lags: int
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Format the variables for the regression"""
x = self._x
y = self._y_df
delta_x = x.diff()
data = [y, x]
for lag in range(-lags, leads + 1):
lag_data = delta_x.shift(-lag)
typ = "LAG" if lag < 0 else "LEAD"
lag_data.columns = [f"D.{c}.{typ}{abs(lag)}" for c in lag_data.columns]
if lag == 0:
lag_data.columns = [f"D.{c}" for c in lag_data.columns]
data.append(lag_data)
data_df: pd.DataFrame = pd.concat(data, axis=1).dropna()
lhs, rhs = data_df.iloc[:, :1], data_df.iloc[:, 1:]
nrhs = rhs.shape[1]
rhs = add_trend(rhs, trend=self._trend, prepend=True)
ntrend = rhs.shape[1] - nrhs
if ntrend:
nx = x.shape[1]
trend = rhs.iloc[:, :ntrend]
rhs = pd.concat(
[rhs.iloc[:, ntrend : ntrend + nx], trend, rhs.iloc[:, ntrend + nx :]],
axis=1,
)
return lhs, rhs
def _ic(self, resids: Float64Array, nparam: int) -> float:
"""Compute an info criterion"""
nobs = resids.shape[0]
sigma2 = float(resids.T @ resids / nobs)
if self._method == "aic":
penalty = 2.0
elif self._method == "hqic":
penalty = 2.0 * float(np.log(np.log(nobs)))
else: # bic
penalty = float(np.log(nobs))
return np.log(sigma2) + nparam * penalty / nobs
def _max_lead_lag(self) -> int:
nobs = self._y.shape[0]
return int(np.ceil(12.0 * (nobs / 100) ** (1 / 4)))
def _leads_and_lags(self) -> Tuple[int, int]:
"""Select the optimal number of leads and lags"""
if self._lags is not None and self._leads is not None:
return self._leads, self._lags
nobs = self._y.shape[0]
max_lead_lag = int(np.ceil(12.0 * (nobs / 100) ** (1 / 4)))
if self._lags is None:
max_lag = max_lead_lag if self._max_lag is None else self._max_lag
min_lag = 0
else:
min_lag = max_lag = self._lags
if self._leads is None:
max_lead = max_lead_lag if self._max_lead is None else self._max_lead
min_lead = 0
else:
min_lead = max_lead = self._leads
variables = self._format_variables(max_lead, max_lag)
lhs = np.asarray(variables[0])
rhs = np.asarray(variables[1])
nx = self._x.shape[1]
# +1 to account for the Delta X(t) (not a lead or a lag)
lead_lag_offset = rhs.shape[1] - (max_lead + max_lag + 1) * nx
always_loc = np.arange(lead_lag_offset)
best_ic = np.inf
best_leads_and_lags = (0, 0)
for lag in range(min_lag, max_lag + 1):
for lead in range(min_lead, max_lead + 1):
if self._common and lag != lead:
continue
lag_start = max_lag - lag
# +1 to get LAG0 in all regressions
lead_end = max_lag + 1 + lead
lead_lag_locs = np.arange(lag_start * nx, lead_end * nx)
lead_lag_locs += lead_lag_offset
locs = np.r_[always_loc, lead_lag_locs]
_rhs = rhs[:, locs]
params = np.linalg.lstsq(_rhs, lhs, rcond=None)[0]
resid = np.squeeze(lhs - _rhs @ params)
ic = self._ic(resid, params.shape[0])
if ic < best_ic:
best_ic = ic
best_leads_and_lags = (lead, lag)
return best_leads_and_lags
def fit(
self,
cov_type: Literal[
"unadjusted", "homoskedastic", "robust", "kernel"
] = "unadjusted",
kernel: str = "bartlett",
bandwidth: Optional[int] = None,
force_int: bool = False,
df_adjust: bool = False,
) -> DynamicOLSResults:
r"""
Estimate the Dynamic OLS regression
Parameters
----------
cov_type : str, default "unadjusted"
Either "unadjusted" (or is equivalent "homoskedastic") or "robust"
(or its equivalent "kernel").
kernel : str, default "bartlett"
The string name of any of any known kernel-based long-run
covariance estimators. Common choices are "bartlett" for the
Bartlett kernel (Newey-West), "parzen" for the Parzen kernel
and "quadratic-spectral" for the Quadratic Spectral kernel.
bandwidth : int, default None
The bandwidth to use. If not provided, the optimal bandwidth is
estimated from the data. Setting the bandwidth to 0 and using
"unadjusted" produces the classic OLS covariance estimator.
Setting the bandwidth to 0 and using "robust" produces White's
covariance estimator.
force_int : bool, default False
Whether the force the estimated optimal bandwidth to be an integer.
df_adjust : bool, default False
Whether the adjust the parameter covariance to account for the
number of parameters estimated in the regression. If true, the
parameter covariance estimator is multiplied by T/(T-k) where
k is the number of regressors in the model.
Returns
-------
DynamicOLSResults
The estimation results.
See Also
--------
arch.unitroot.cointegration.engle_granger
Cointegration testing using the Engle-Granger methodology
statsmodels.regression.linear_model.OLS
Ordinal Least Squares regression.
Notes
-----
When using the unadjusted covariance, the parameter covariance is
estimated as
.. math::
T^{-1} \hat{\sigma}^2_{HAC} \hat{\Sigma}_{ZZ}^{-1}
where :math:`\hat{\sigma}^2_{HAC}` is an estimator of the long-run
variance of the regression error and
:math:`\hat{\Sigma}_{ZZ}=T^{-1}Z'Z`. :math:`Z_t` is a vector the
includes all terms in the regression (i.e., deterministics,
cross-sectional, leads and lags) When using the robust covariance,
the parameter covariance is estimated as
.. math::
T^{-1} \hat{\Sigma}_{ZZ}^{-1} \hat{S}_{HAC} \hat{\Sigma}_{ZZ}^{-1}
where :math:`\hat{S}_{HAC}` is a Heteroskedasticity-Autocorrelation
Consistent estimator of the covariance of the regression scores
:math:`Z_t\epsilon_t`.
"""
leads, lags = self._leads_and_lags()
# TODO: Rank check and drop??
lhs, rhs = self._format_variables(leads, lags)
mod = OLS(lhs, rhs)
res = mod.fit()
coeffs = np.asarray(res.params)
resid = lhs.squeeze() - (rhs @ coeffs).squeeze()
resid.name = "resid"
cov, est = self._cov(
cov_type, kernel, bandwidth, force_int, df_adjust, rhs, resid
)
params = pd.Series(np.squeeze(coeffs), index=rhs.columns, name="params")
num_x = self._x.shape[1]
return DynamicOLSResults(
params,
cov,
resid,
lags,
leads,
cov_type,
est,
num_x,
self._trend,
res,
df_adjust,
)
@staticmethod
def _cov(
cov_type: Literal["unadjusted", "homoskedastic", "robust", "kernel"],
kernel: str,
bandwidth: Optional[int],
force_int: bool,
df_adjust: bool,
rhs: pd.DataFrame,
resids: pd.Series,
) -> Tuple[pd.DataFrame, lrcov.CovarianceEstimator]:
"""Estimate the covariance"""
kernel = kernel.lower().replace("-", "").replace("_", "")
if kernel not in KERNEL_ESTIMATORS:
raise ValueError(KERNEL_ERR)
x = np.asarray(rhs)
eps = ensure2d(np.asarray(resids), "eps")
nobs, nx = x.shape
sigma_xx = x.T @ x / nobs
sigma_xx_inv = np.linalg.inv(sigma_xx)
kernel_est = KERNEL_ESTIMATORS[kernel]
scale = nobs / (nobs - nx) if df_adjust else 1.0
if cov_type in ("unadjusted", "homoskedastic"):
est = kernel_est(eps, bandwidth, center=False, force_int=force_int)
sigma2 = np.squeeze(est.cov.long_run)
cov = (scale * sigma2) * sigma_xx_inv / nobs
elif cov_type in ("robust", "kernel"):
scores = x * eps
est = kernel_est(scores, bandwidth, center=False, force_int=force_int)
s = est.cov.long_run
cov = scale * sigma_xx_inv @ s @ sigma_xx_inv / nobs
else:
raise ValueError("Unknown cov_type")
cov_df = pd.DataFrame(cov, columns=rhs.columns, index=rhs.columns)
return cov_df, est
class CointegrationAnalysisResults(_CommonCointegrationResults):
def __init__(
self,
params: pd.Series,
cov: pd.DataFrame,
resid: pd.Series,
omega_112: float,
kernel_est: lrcov.CovarianceEstimator,
num_x: int,
trend: UnitRootTrend,
df_adjust: bool,
rsquared: float,
rsquared_adj: float,
estimator_type: str,
):
super().__init__(
params,
cov,
resid,
kernel_est,
num_x,
trend,
df_adjust,
rsquared,
rsquared_adj,
estimator_type,
)
self._omega_112 = omega_112
@property
def long_run_variance(self) -> float:
"""
Long-run variance estimate used in the parameter covariance estimator
"""
return self._omega_112
COMMON_DOCSTRING = r"""
%(method)s cointegrating vector estimation.
Parameters
----------
y : array_like
The left-hand-side variable in the cointegrating regression.
x : array_like
The right-hand-side variables in the cointegrating regression.
trend : {{"n","c","ct","ctt"}}, default "c"
Trend to include in the cointegrating regression. Trends are:
* "n": No deterministic terms
* "c": Constant
* "ct": Constant and linear trend
* "ctt": Constant, linear and quadratic trends
x_trend : {None,"c","ct","ctt"}, default None
Trends that affects affect the x-data but do not appear in the
cointegrating regression. x_trend must be at least as large as
trend, so that if trend is "ct", x_trend must be either "ct" or
"ctt".
Notes
-----
The cointegrating vector is estimated from the regressions
.. math::
Y_t & = D_{1t} \delta + X_t \beta + \eta_{1t} \\
X_t & = D_{1t} \Gamma_1 + D_{2t}\Gamma_2 + \epsilon_{2t} \\
\eta_{2t} & = \Delta \epsilon_{2t}
or if estimated in differences, the last two lines are
.. math::
\Delta X_t = \Delta D_{1t} \Gamma_1 + \Delta D_{2t} \Gamma_2 + \eta_{2t}
Define the vector of residuals as :math:`\eta = (\eta_{1t},\eta'_{2t})'`, and the
long-run covariance
.. math::
\Omega = \sum_{h=-\infty}^{\infty} E[\eta_t\eta_{t-h}']
and the one-sided long-run covariance matrix
.. math::
\Lambda_0 = \sum_{h=0}^\infty E[\eta_t\eta_{t-h}']
The covariance matrices are partitioned into a block form
.. math::
\Omega = \left[\begin{array}{cc}
\omega_{11} & \omega_{12} \\
\omega'_{12} & \Omega_{22}
\end{array} \right]
The cointegrating vector is then estimated using modified data
%(estimator)s
"""
CCR_METHOD = "Canonical Cointegrating Regression"
CCR_ESTIMATOR = r"""
.. math::
X^\star_t & = X_t - \hat{\Lambda}_2'\hat{\Sigma}^{-1}\hat{\eta}_t \\
Y^\star_t & = Y_t - (\hat{\Sigma}^{-1} \hat{\Lambda}_2 \hat{\beta}
+ \hat{\kappa})' \hat{\eta}_t
where :math:`\hat{\kappa} = (0,\hat{\Omega}_{22}^{-1}\hat{\Omega}'_{12})` and
the regression
.. math::
Y^\star_t = D_{1t} \delta + X^\star_t \beta + \eta^\star_{1t}
See [1]_ for further details.
References
----------
.. [1] <NAME>. (1992). Canonical cointegrating regressions. Econometrica:
Journal of the Econometric Society, 119-143.
"""
FMOLS_METHOD = "Fully Modified OLS"
FMOLS_ESTIMATOR = r"""
.. math::
Y^\star_t = Y_t - \hat{\omega}_{12}\hat{\Omega}_{22}\hat{\eta}_{2t}
as
.. math::
\hat{\theta} = \left[\begin{array}{c}\hat{\gamma}_1 \\ \hat{\beta} \end{array}\right]
= \left(\sum_{t=2}^T Z_tZ'_t\right)^{-1}
\left(\sum_{t=2}^t Z_t Y^\star_t -
T \left[\begin{array}{c} 0 \\ \lambda^{\star\prime}_{12}
\end{array}\right]\right)
where the bias term is defined
.. math::
\lambda^\star_{12} = \hat{\lambda}_{12}
- \hat{\omega}_{12}\hat{\Omega}_{22}\hat{\omega}_{21}
See [1]_ for further details.
References
----------
.. [1] <NAME>., & <NAME>. (1990). Estimation and inference in models of
cointegration: A simulation study. Advances in Econometrics, 8(1989), 225-248.
"""
@Substitution(method=FMOLS_METHOD, estimator=FMOLS_ESTIMATOR)
@Appender(COMMON_DOCSTRING)
class FullyModifiedOLS(object):
def __init__(
self,
y: ArrayLike1D,
x: ArrayLike2D,
trend: UnitRootTrend = "c",
x_trend: Optional[UnitRootTrend] = None,
) -> None:
setup = _check_cointegrating_regression(y, x, trend)
self._y = setup.y
self._x = setup.x
self._trend = setup.trend
self._x_trend = x_trend
self._y_df = | pd.DataFrame(self._y) | pandas.DataFrame |
import funcy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from dateutil import parser
from tqdm import tqdm
from utils.helpers import *
from utils.plot import plot_joint_distribution
font = {
"size": 30
}
matplotlib.rc("font", **font)
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MOST_RECENT_FILE = sorted(os.listdir(os.path.join(BASE_DIR, "data", "REDCap")))[-1]
REDCAP_FPATH = os.path.join(BASE_DIR, "data", "REDCap", MOST_RECENT_FILE)
SERIES_ID_FPATH = os.path.join(BASE_DIR, "data", "match_redcap_plataforma.csv")
SEGMENTATION_FPATH = os.path.join(BASE_DIR, "data", "inference_df.csv")
get_date_regex = r"ProjetoCOVIDAI_DATA_(?P<data>.*)_\d+.csv"
date_str = re.match(get_date_regex, MOST_RECENT_FILE).group("data")
dataset_date = parser.parse(date_str)
# Normalize name and CPF
df = pd.read_csv(REDCAP_FPATH)
df.nome = df.nome.apply(lambda s: to_normalized_string(s) if pd.notna(s) else s)
df.cpf = df.cpf.apply(lambda v: str(int(v)) if pd.notna(v) else v)
# Fill redcap_repeat_instrument missing data with "dados_pessoais_unico" since these
# rows are not filled automatically by the database
df.redcap_repeat_instrument = df.redcap_repeat_instrument.fillna("dados_pessoais_unico")
# Fill the missing hospitalization date with date of admission to ICU if existent
df.data_admissao_hospitalar = df.data_admissao_hospitalar.fillna(df.data_admissao_uti)
# Calculate length of stay based on hospitalization date and date of discharge or
# date of death
fill_length_of_stay = df.apply(
lambda row: calculate_length_of_stay(
row["data_admissao_hospitalar"],
row["data_alta_hospitalar"],
row["data_obito"]
),
axis=1
)
df.tempo_estadia_hospitalar = df.tempo_estadia_hospitalar.fillna(fill_length_of_stay)
# Calculate the date of discharge from ICU based on the date of admission
# in the ICU and length of stay in the ICU.
df["data_alta_uti"] = df.apply(
lambda row: sum_date_with_interval(
row["data_admissao_uti"],
row["tempo_estadia_uti"]
),
axis=1
)
# Calculate the date of removal of the ventilation based on the date of ventilation
# and the length of ventilation
df["data_remocao_ventilacao"] = df.apply(
lambda row: sum_date_with_interval(
row["data_ventilacao"],
row["tempo_ventilacao_mecanica"]
),
axis=1
)
# Calculate age and body mass index
df["idade"] = df.apply(
lambda row: calculate_age(
row["data_nasc"],
row["data_admissao_hospitalar"],
dataset_date
),
axis=1
)
df["imc"] = df.peso / (df.altura ** 2)
# Some of the rows have the plaquets number in a different unity and need to be
# multiplied by 1000
df.plaquetas = df.plaquetas.apply(lambda v: v * 1000 if v < 1000 else v)
############################## Finished processing the ordinary data ##############################
# Here we define variables useful for processing the rest of the data
cols_intermediate_outcomes = [
"data_sepse",
"sepse",
"data_sdra",
"sdra",
"data_falencia_cardiaca",
"falencia_cardiaca",
"data_choque_septico",
"choque_septico",
"data_coagulopatia",
"coagulopatia",
"data_iam",
"iam",
"data_ira",
"ira"
]
cols_personal_data = [
"nome",
"cpf",
"instituicao",
"data_nasc",
"idade",
"sexo",
"altura",
"peso",
"imc",
"alta",
"obito",
"data_admissao_hospitalar",
"data_admissao_uti",
"data_obito",
"data_alta_hospitalar",
"data_alta_uti",
"data_ventilacao",
"data_remocao_ventilacao",
"tempo_estadia_hospitalar",
"tempo_estadia_uti",
"tempo_ventilacao_mecanica"
] + cols_intermediate_outcomes
cols_comorbidities = [
"has",
"ieca_bra",
"dm",
"asma",
"tabagista",
"dpoc",
"cardiopatia",
"irc",
"neoplasia",
"aids",
"neutropenia"
]
cols_respiratory_comorbidities = [
"asma", "tabagista", "dpoc"
]
cols_cardiac_comorbidities = [
"has", "cardiopatia"
]
cols_dates = [
col for col in df.columns
if "data" in col and col not in
cols_personal_data + ["redcap_data_access_group"]
]
identity_map = {
0: 0,
1: 1
}
irc_map = {
1: "negativo",
2: "nao_dialitico",
3: "dialitico"
}
neoplasia_map = {
1: "negativo",
2: "primaria_ou_secundaria",
3: "outras"
}
map_comorbidities = {
"irc": irc_map,
"neoplasia": neoplasia_map
}
# Now we build a separate dataframe for saving pesonal data.
df_personal_data = df[df.redcap_repeat_instrument == "dados_pessoais_unico"]
# Discriminate patients that were admitted to the hospital and to the ICU. Also, discriminate those that
# were discharged and those who died.
df_personal_data["internacao"] = df_personal_data.data_admissao_hospitalar.notna()
df_personal_data["uti"] = df_personal_data.data_admissao_uti.notna()
df_personal_data["obito"] = df_personal_data.data_obito.notna()
df_personal_data["alta"] = df_personal_data.data_alta_hospitalar.notna()
df_personal_data = df_personal_data[
["record_id"] + cols_personal_data + cols_comorbidities
]
for col in cols_comorbidities:
df_personal_data[col] = df_personal_data[col].map(map_comorbidities.get(col, identity_map))
# Count the number of previous comorbidities each patient has.
df_personal_data["n_comorbidades"] = df_personal_data[cols_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_respiratorias"] = df_personal_data[cols_respiratory_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_cardiacas"] = df_personal_data[cols_cardiac_comorbidities].apply(count_comorbidities, axis=1)
############################## Finished processing the personal data ##############################
# Now we build separate dataframes for saving clinical, treatment, laboratorial, image and confirmatory data.
# Clinical dataframe
cols_clinical = [
"data_dispneia",
"dispneia",
"data_sofa",
"sofa_score",
"data_saturacao_o2",
"saturacao_o2",
"data_saps_3",
"saps_3"
]
df_clinical = df[df.redcap_repeat_instrument == "evolucao_clinica_multiplo"]
df_clinical = df_clinical[["record_id"] + cols_clinical]
# We need separate dataframes for each date. Note that the clinical dataframe has four date. We will separate
# the columns accordingly.
df_dispneia = df_clinical[[
"record_id",
"data_dispneia",
"dispneia"
]]
df_sofa = df_clinical[[
"record_id",
"data_sofa",
"sofa_score"
]]
df_saturacao_o2 = df_clinical[[
"record_id",
"data_saturacao_o2",
"saturacao_o2"
]]
df_saps_3 = df_clinical[[
"record_id",
"data_saps_3",
"saps_3"
]]
# Treatment dataframe
cols_treatment = [
"data_ventilacao",
"ventilacao",
"pao2_fio2",
"data_pronacao",
"pronacao",
"data_hemodialise",
"hemodialise"
]
df_treatment = df[df.redcap_repeat_instrument == "evolucao_tratamento_multiplo"]
df_treatment = df_treatment[["record_id"] + cols_treatment]
# Note that the treatment dataframe has four date. We will separate the columns accordingly
# just as we did for the clinical dataframe.
df_ventilacao = df_treatment[[
"record_id",
"data_ventilacao",
"ventilacao",
"pao2_fio2"
]]
df_pronacao = df_treatment[[
"record_id",
"data_pronacao",
"pronacao"
]]
df_hemodialise = df_treatment[[
"record_id" ,
"data_hemodialise",
"hemodialise"
]]
# Laboratory results dataframe
cols_laboratory = [
"leucocitos",
"linfocitos",
"neutrofilos",
"tgp",
"creatinina",
"pcr",
"d_dimero",
"il_6",
"plaquetas",
"rni",
"troponina",
"pro_bnp",
"bicarbonato",
"lactato"
]
df_laboratory = df[df.redcap_repeat_instrument == "evolucao_laboratorial_multiplo"]
df_laboratory = df_laboratory[["record_id", "data_resultados_lab"] + cols_laboratory]
# Image dataframe
cols_image = [
"uid_imagem",
"tipo_imagem",
"data_imagem",
"padrao_imagem_rsna",
"score_tc_dir_sup",
"score_tc_dir_med",
"score_tc_dir_inf",
"score_tc_esq_sup",
"score_tc_esq_med",
"score_tc_esq_inf"
]
df_image = df[df.redcap_repeat_instrument == "evolucao_imagem_multiplo"]
df_image.uid_imagem = df_image.uid_imagem.apply(lambda s: s.strip() if pd.notna(s) else s)
df_image = df_image[["record_id", "redcap_repeat_instance"] + cols_image]
df_image = pd.merge(
left=df_personal_data[["record_id", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"]],
right=df_image,
how="right",
on="record_id",
validate="one_to_many"
)
uids_internados = set(df_image[df_image.data_admissao_hospitalar.notna()].uid_imagem.unique())
# For images, we also have the data retrieved from the deep segmentation model. We need
# to enrich our dataframe with the percentage of healthy lungs, affected by ground-glass opacity
# and consolidation, and the amount of fat in patient's body.
cols_series_id = [
"record_id",
"redcap_repeat_instance",
"infer_series_id"
]
df_series_id = pd.read_csv(SERIES_ID_FPATH, sep=";")
df_series_id = df_series_id[cols_series_id]
df_series_id = df_series_id.drop_duplicates()
cols_segmentation = [
"UID_Plataforma",
"series_id",
"seg_consolidacao",
"seg_normal",
"seg_vf1",
"seg_vf2",
"seg_vf3",
"volume_pulmao",
"taxa_gordura",
"volume_gordura",
"mediastino"
]
tmp_data = []
df_seg_raw = pd.read_csv(SEGMENTATION_FPATH)
df_seg_raw = df_seg_raw[cols_segmentation]
df_seg_raw = df_seg_raw[df_seg_raw.volume_pulmao >= 1.]
df_seg_raw = pd.merge(left=df_series_id, right=df_seg_raw, left_on="infer_series_id", right_on="series_id", how="right")
# Each TC study might have multiple series. We need to select the one with
grouped = df_seg_raw.groupby("UID_Plataforma")
for uid_imagem, group in grouped:
if any(group.mediastino):
use_group = group[group.mediastino]
else:
use_group = group
sorted_group = use_group.sort_values("volume_pulmao")
tmp_data.append(
dict(sorted_group.iloc[-1])
)
df_seg = pd.DataFrame(tmp_data)
df_seg = df_seg[df_seg.seg_normal.notna()]
df_image = pd.merge(
left=df_image,
right=df_seg,
how="left",
on=["record_id", "redcap_repeat_instance"]
)
df_image[
["record_id", "redcap_repeat_instance", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"] + cols_image
].to_csv(os.path.join(BASE_DIR, "data", "TC_scans.csv"), index=False)
df_image = df_image.rename({"redcap_repeat_instance": "redcap_repeat_instance_image"})
df_matches = df_image[
(df_image.seg_normal.notna()) & (df_image.data_admissao_hospitalar.notna())
]
df_matches[
["record_id", "data_admissao_hospitalar", "instituicao", "data_imagem", "uid_imagem"]
].to_csv(os.path.join(BASE_DIR, "data", "matches.csv"), index=False)
n_matches = df_matches.uid_imagem.nunique()
print(f"{n_matches} between REDCap and segmentation\n")
# COVID-19 confirmation dataframe
df_confirmation = df[df.redcap_repeat_instrument == "confirmacao_covid_multiplo"]
############################## Finished processing the results data ##############################
# Now we are going to create a dataframe that each row corresponds to a moment in the patient stay at the
# hospital. For each date in the patient history, we will update the row with the latest information about
# that patient.
# First, we need to define some helper functions to work on the processing of the data.
def get_group(grouped, key, default_columns):
"""
Gets a group by key from a Pandas Group By object. If the key does not exist, returns an empty
group with the default columns.
"""
if key in grouped.groups:
group = grouped.get_group(key)
else:
group = pd.DataFrame([], columns=default_columns)
return group
def last_register_before_date(registers, date_col, date, default_columns):
"""
Gets the last register before a reference date in a dataframe. If there are no register before the
date, returns an empty register with the default columns.
"""
registers = registers[registers[date_col].notna()]
registers_before_date = registers[
registers[date_col].apply(parser.parse) <= date
]
if len(registers_before_date) == 0:
registers_before_date = pd.DataFrame([(np.nan for col in default_columns)], columns=default_columns)
last_register = registers_before_date.iloc[-1]
return last_register
# Theb, we need to group by patient all the dataframes we built previously.
grouped_dispneia = df_dispneia.groupby("record_id")
grouped_sofa = df_sofa.groupby("record_id")
grouped_saturacao_o2 = df_saturacao_o2.groupby("record_id")
grouped_saps_3 = df_saps_3.groupby("record_id")
grouped_image = df_image.groupby("record_id")
grouped_laboratory = df_laboratory.groupby("record_id")
grouped_ventilacao = df_ventilacao.groupby("record_id")
grouped_pronacao = df_pronacao.groupby("record_id")
grouped_hemodialise = df_hemodialise.groupby("record_id")
# Now we iterate over the personal data dataframe, which has one row per patient.
after_discharge = []
after_death = []
new_rows = []
for i, row in tqdm(df_personal_data.iterrows(), total=len(df_personal_data)):
record_id = row["record_id"]
institution = row["instituicao"]
hospitalization_date = row["data_admissao_hospitalar"]
discharge_date = row["data_alta_hospitalar"]
date_of_death = row["data_obito"]
if pd.notna(date_of_death):
date_of_death = parser.parse(date_of_death)
if pd.notna(discharge_date):
discharge_date = parser.parse(discharge_date)
if pd.notna(hospitalization_date):
hospitalization_date = parser.parse(hospitalization_date)
# Get each group and sort by the date
group_dispneia = get_group(
grouped_dispneia, record_id, df_dispneia.columns
).sort_values("data_dispneia")
group_sofa = get_group(
grouped_sofa, record_id, df_sofa.columns
)
group_saturacao_o2 = get_group(
grouped_saturacao_o2, record_id, df_saturacao_o2.columns
)
group_saps_3 = get_group(
grouped_saps_3, record_id, df_saps_3.columns
)
group_image = get_group(
grouped_image, record_id, df_image.columns
)
group_laboratory = get_group(
grouped_laboratory, record_id, df_laboratory.columns
)
group_ventilacao = get_group(
grouped_ventilacao, record_id, df_ventilacao.columns
)
group_pronacao = get_group(
grouped_pronacao, record_id, df_pronacao.columns
)
group_hemodialise = get_group(
grouped_hemodialise, record_id, df_hemodialise.columns
)
# List the dates available for the patient
patient_dates = set(filter(
pd.notna,
list(group_dispneia.data_dispneia) +
list(group_sofa.data_sofa) +
list(group_saturacao_o2.data_saturacao_o2) +
list(group_saps_3.data_saps_3) +
list(group_image.data_imagem) +
list(group_laboratory.data_resultados_lab) +
list(group_ventilacao.data_ventilacao) +
list(group_pronacao.data_pronacao) +
list(group_hemodialise.data_hemodialise)
))
patient_dates = funcy.lmap(parser.parse, patient_dates)
# Now we iterate over the dates of the patient retrieving the last register for
# each group.
new_patient_rows = []
for date_tmp in patient_dates:
# If the date is after the patient's death or the patient's discharge, we want to ignore
# the register.
if abs(date_tmp.year - dataset_date.year) > 0:
continue
if pd.notna(date_of_death) and date_tmp > date_of_death:
after_death.append(record_id)
continue
if pd.notna(discharge_date) and date_tmp > discharge_date:
after_discharge.append(discharge_date)
continue
last_register_dispneia = last_register_before_date(group_dispneia, "data_dispneia", date_tmp, df_dispneia.columns)
last_register_sofa = last_register_before_date(group_sofa, "data_sofa", date_tmp, df_sofa.columns)
last_register_saturacao_o2 = last_register_before_date(group_saturacao_o2, "data_saturacao_o2", date_tmp, df_saturacao_o2.columns)
last_register_saps_3 = last_register_before_date(group_saps_3, "data_saps_3", date_tmp, df_saps_3.columns)
last_register_image = last_register_before_date(group_image, "data_imagem", date_tmp, df_image.columns)
last_register_laboratory = last_register_before_date(group_laboratory, "data_resultados_lab", date_tmp, df_laboratory.columns)
last_register_pronacao = last_register_before_date(group_pronacao, "data_pronacao", date_tmp, df_pronacao.columns)
last_register_hemodialise = last_register_before_date(group_hemodialise, "data_hemodialise", date_tmp, df_hemodialise.columns)
# Need for mechanical ventilation is one of our target variables. Thus, we do not want to get the last register before the
# current date. We want to know if the patient ever needed mechanical ventilation at any point in time.
ventilacao = group_ventilacao[group_ventilacao.ventilacao == group_ventilacao.ventilacao.max()].sort_values("data_ventilacao", ascending=False)
if len(ventilacao) == 0:
ventilacao = pd.DataFrame([(np.nan for col in group_ventilacao.columns)], columns=group_ventilacao.columns)
ventilacao = ventilacao.iloc[-1]
new_row = {}
new_row.update(row)
new_row.update(dict(last_register_dispneia))
new_row.update(dict(last_register_sofa))
new_row.update(dict(last_register_saturacao_o2))
new_row.update(dict(last_register_saps_3))
new_row.update(dict(last_register_image))
new_row.update(dict(last_register_laboratory))
new_row.update(dict(last_register_pronacao))
new_row.update(dict(last_register_hemodialise))
new_row.update(dict(ventilacao))
new_row["data"] = date_tmp
new_row["record_id"] = record_id
new_row["instituicao"] = institution
new_row["dias_desde_admissao"] = (date_tmp - hospitalization_date).days if pd.notna(hospitalization_date) else np.nan
date_of_outcome = date_of_death if pd.notna(date_of_death) else discharge_date
new_row["dias_antes_desfecho"] = (date_of_outcome - date_tmp).days if pd.notna(date_of_outcome) else np.nan
new_patient_rows.append(new_row)
new_rows.extend(new_patient_rows)
df_final = pd.DataFrame(new_rows)
# We need to calculate some dummy variables for the categorical data.
padrao_rsna_dummies = pd.get_dummies(df_final.padrao_imagem_rsna, prefix="padrao_rsna")
ventilacao_dummies = pd.get_dummies(df_final.ventilacao, prefix="ventilacao")
neoplasia_dummies = pd.get_dummies(df_final.neoplasia, prefix="neoplasia")
irc_dummies = pd.get_dummies(df_final.irc, prefix="irc")
sexo_dummies = pd.get_dummies(df_final.sexo, prefix="sexo")
df_final = pd.concat([df_final,
padrao_rsna_dummies,
ventilacao_dummies,
neoplasia_dummies,
irc_dummies,
sexo_dummies], axis=1)
def calc_ventilation(row):
if | pd.isna(row["ventilacao"]) | pandas.isna |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
| Timestamp('2011-08-01 10:00') | pandas.lib.Timestamp |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
Var_dfff_covar = pd.concat([(Var_cumsum_covar * 100)], axis=1)
Eigen_Analysis_covar = pd.concat([PC_df_covar.T, Eigen_df_covar.T, Var_df_covar.T, Var_dfff_covar.T],
axis=0)
Eigen_Analysis_covar = Eigen_Analysis_covar.rename(columns=Eigen_Analysis_covar.iloc[0])
Eigen_Analysis_covar = Eigen_Analysis_covar.drop(Eigen_Analysis_covar.index[0])
Eigen_Analysis_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_covar = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier_covar = outlier_dff.loc[:, ].values
pca_outlier_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier_covar)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
dfff_outlier_covar = finalDf_outlier_covar
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier_covar = np.interp(70,
Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier_covar = math.ceil(PC_interp_outlier_covar)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
Var_dfff_outlier_covar = pd.concat([Var_cumsum_outlier_covar * 100], axis=1)
Eigen_Analysis_Outlier_covar = pd.concat(
[PC_df_outlier_covar.T, Eigen_df_outlier_covar.T, Var_df_outlier_covar.T, Var_dfff_outlier_covar.T],
axis=0)
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.rename(
columns=Eigen_Analysis_Outlier_covar.iloc[0])
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.drop(Eigen_Analysis_Outlier_covar.index[0])
Eigen_Analysis_Outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier_covar
elif all_custom == "Custom":
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# INPUT DATA WITH OUTLIERS
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
eigenvalues_scale_input = pca_scale_input.explained_variance_
Eigen_df_scale_input = pd.DataFrame(data=eigenvalues_scale_input, columns=["Eigenvaues"])
PC_df_scale_input = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input = pd.DataFrame(data=Var_scale_input,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input = Var_df_scale_input.cumsum()
Var_dfff_scale_input = pd.concat([Var_cumsum_scale_input * 100], axis=1)
Eigen_Analysis_scale_input = pd.concat([PC_df_scale_input.T, Eigen_df_scale_input.T,
Var_df_scale_input.T, Var_dfff_scale_input.T], axis=0)
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.rename(columns=Eigen_Analysis_scale_input.iloc[0])
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.drop(Eigen_Analysis_scale_input.index[0])
Eigen_Analysis_scale_input.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input
elif outlier == "Yes" and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
eigenvalues_scale_input_outlier = pca_scale_input_outlier.explained_variance_
Eigen_df_scale_input_outlier = pd.DataFrame(data=eigenvalues_scale_input_outlier, columns=["Eigenvaues"])
PC_df_scale_input_outlier = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier = pd.DataFrame(data=Var_scale_input_outlier,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier = Var_df_scale_input_outlier.cumsum()
Var_dfff_scale_input_outlier = pd.concat([Var_cumsum_scale_input_outlier * 100], axis=1)
Eigen_Analysis_scale_input_outlier = pd.concat([PC_df_scale_input_outlier.T, Eigen_df_scale_input_outlier.T,
Var_df_scale_input_outlier.T,
Var_dfff_scale_input_outlier.T], axis=0)
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.rename(
columns=Eigen_Analysis_scale_input_outlier.iloc[0])
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.drop(
Eigen_Analysis_scale_input_outlier.index[0])
Eigen_Analysis_scale_input_outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input_covar = dff_input.loc[:, features_input].values
# INPUT DATA WITH OUTLIERS
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
eigenvalues_scale_input_covar = pca_scale_input_covar.explained_variance_
Eigen_df_scale_input_covar = pd.DataFrame(data=eigenvalues_scale_input_covar, columns=["Eigenvaues"])
PC_df_scale_input_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input_covar = pd.DataFrame(data=Var_scale_input_covar,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input_covar = Var_df_scale_input_covar.cumsum()
Var_dfff_scale_input_covar = pd.concat([Var_cumsum_scale_input_covar * 100], axis=1)
Eigen_Analysis_scale_input_covar = pd.concat([PC_df_scale_input_covar.T, Eigen_df_scale_input_covar.T,
Var_df_scale_input_covar.T, Var_dfff_scale_input_covar.T],
axis=0)
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.rename(
columns=Eigen_Analysis_scale_input_covar.iloc[0])
Eigen_Analysis_scale_input_covar = Eigen_Analysis_scale_input_covar.drop(
Eigen_Analysis_scale_input_covar.index[0])
Eigen_Analysis_scale_input_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
eigenvalues_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_
Eigen_df_scale_input_outlier_covar = pd.DataFrame(data=eigenvalues_scale_input_outlier_covar,
columns=["Eigenvaues"])
PC_df_scale_input_outlier_covar = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier_covar = pd.DataFrame(data=Var_scale_input_outlier_covar,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier_covar = Var_df_scale_input_outlier_covar.cumsum()
Var_dfff_scale_input_outlier_covar = pd.concat([Var_cumsum_scale_input_outlier_covar * 100], axis=1)
Eigen_Analysis_scale_input_outlier_covar = pd.concat(
[PC_df_scale_input_outlier_covar.T, Eigen_df_scale_input_outlier_covar.T,
Var_df_scale_input_outlier_covar.T,
Var_dfff_scale_input_outlier_covar.T], axis=0)
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.rename(
columns=Eigen_Analysis_scale_input_outlier_covar.iloc[0])
Eigen_Analysis_scale_input_outlier_covar = Eigen_Analysis_scale_input_outlier_covar.drop(
Eigen_Analysis_scale_input_outlier_covar.index[0])
Eigen_Analysis_scale_input_outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier_covar
data = data_frame_EigenA.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame_EigenA.columns]
csv_string = data_frame_EigenA.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-loadings', 'download'),
[Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(outlier, matrix_type):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Loadings_correlation_matrix_data.csv'
elif outlier == 'No' and matrix_type == "Covariance":
download = 'Loadings_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-loadings', 'data'),
Output('data-table-loadings', 'columns'),
Output('download-link-loadings', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf = pd.concat([line_group_scale_df, loading_scale_df], axis=1)
data_frame = loading_scale_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale,
columns=["PC" + str(i + 1) for i in range(len(features_outlier))])
line_group_outlier_scale_df = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf = pd.concat([line_group_outlier_scale_df, loading_outlier_scale_df], axis=1)
data_frame = loading_outlier_scale_dataf
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar,
columns=["PC" + str(i + 1) for i in range(len(features))])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['Features'])
loading_scale_dataf_covar = pd.concat([line_group_scale_df_covar, loading_scale_df_covar], axis=1)
data_frame = loading_scale_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
# uses covariance matrix
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar,
columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar,
columns=["PC" + str(i + 1) for i in
range(len(features_outlier))])
line_group_outlier_scale_df_covar = pd.DataFrame(data=features_outlier, columns=['Features'])
loading_outlier_scale_dataf_covar = pd.concat(
[line_group_outlier_scale_df_covar, loading_outlier_scale_df_covar], axis=1)
data_frame = loading_outlier_scale_dataf_covar
if all_custom == 'Custom':
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf = pd.concat([line_group_scale_input_df, loading_scale_input_df], axis=1)
data_frame = loading_scale_input_dataf
elif outlier == 'Yes' and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['Features'])
loading_scale_input_outlier_dataf = pd.concat([line_group_scale_input_outlier_df,
loading_scale_input_outlier_df], axis=1)
data_frame = loading_scale_input_outlier_dataf
elif outlier == "No" and matrix_type == "Covariance":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# INPUT DATA WITH OUTLIERS
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar,
columns=["PC" + str(i + 1) for i in range(len(features_input))])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['Features'])
loading_scale_input_dataf_covar = pd.concat([line_group_scale_input_df_covar, loading_scale_input_df_covar],
axis=1)
data_frame = loading_scale_input_dataf_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
dff_target = dff[input]
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar,
columns=["PC" + str(i + 1)
for i in range(len(features_input_outlier))])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['Features'])
loading_scale_input_outlier_dataf_covar = pd.concat([line_group_scale_input_outlier_df_covar,
loading_scale_input_outlier_df_covar], axis=1)
data_frame = loading_scale_input_outlier_dataf_covar
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-cos2', 'download'),
[Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(outlier, matrix_type):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Cos2_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Cos2_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Cos2_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Cos2_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-cos2', 'data'),
Output('data-table-cos2', 'columns'),
Output('download-link-cos2', 'href'), ],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = | pd.read_json(data, orient='split') | pandas.read_json |
#! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import pandas as pd
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
def null_count(train_data, test_data):
print('<< null count: train_data >>')
print( | pd.isnull(train_data) | pandas.isnull |
"""
Output/model results file interfaces.
All defined classes are attached to project and run instances as
propertyplugin that return a pandas.DataFrame. For files to be read from the
SWIM project, a from_project method needs to be defined. To read the data from
a run instance, a method refering to the extension of a file saved as
run file needs to be defined (e.g. from_csv) or from_run to overwrite the
file selection.
Conventions
-----------
- class and method names should be lowercase, words separated by _ and
descriptions should be singular (subbasin rather than subbasins)
- name word order: spatial domain (catchment, subbasin, hydrotope, station
etc.), timestep adjective (daily, monthly, annually, average), variable
and/or other descriptions. Pattern: domain_timestep_variable[_description...]
- all read ``from_*`` methods should parse any keyword to the pandas.read call
"""
import os.path as osp
from glob import glob
import datetime as dt
import calendar
import warnings
import inspect
import numpy as np
import pandas as pd
from modelmanager.utils import propertyplugin
from modelmanager.plugins.pandas import ProjectOrRunData
from swimpy import utils, plot, hydro
from swimpy.plot import plot_function as _plot_function
from swimpy.grass import _subbasin_or_hydrotope_values_to_raster
RESDIR = 'output/Res'
GISDIR = 'output/GIS'
class station_daily_discharge(ProjectOrRunData):
"""
Daily discharge of selected stations.
"""
path = osp.join(RESDIR, 'Q_gauges_sel_sub_routed_m3s.csv')
plugin = ['plot', 'plot_regime', 'plot_flow_duration_polar']
@staticmethod
def from_project(path, **readkwargs):
readkwargs.setdefault("skipinitialspace", True)
df = pd.read_csv(path, **readkwargs)
dtms = [dt.date(y, 1, 1) + dt.timedelta(d - 1)
for y, d in zip(df.pop('YEAR'), df.pop('DAY'))]
df.index = pd.PeriodIndex(dtms, freq='d', name='time')
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='d')
return df
def _default_stations(self, stations=None):
if stations is None:
dstations = self.columns[1:] # first column is observed
else:
assert type(stations) == str or len(stations) > 0
dstations = [stations] if type(stations) == str else stations
return dstations
@_plot_function
def plot(self, stations=None, freq='d', minmax=False,
observed=False, ax=None, runs=None, output=None, **linekw):
"""Line plot of daily discharge of selected stations.
Arguments
---------
stations : None | str | iterable
Only show single (str) or subset (iterable) of stations. If None,
show all found in file.
freq : <pandas frequency>
Any pandas frequency to aggregate to.
observed : bool
Add line for observed discharge. stations.daily_discharge_observed
must be configured.
**linekw :
Parse any keyword to the line plot function.
"""
stations = self._default_stations(stations)
data = utils.aggregate_time(self[stations], freq=freq)
# plot observed
if observed:
obs = utils.aggregate_time(
(self.project.stations.daily_discharge_observed
.loc[self.index, stations]), freq=freq)
clrs = plot.default_colors(len(stations), linekw.get('colors', []))
for c, s in zip(clrs, stations):
plot.plot_discharge(obs[s], ax, linestyle='--', color=c)
for s in stations:
# add label if multiple runs
if runs and len(runs[0]) > 1:
qs, i = runs
lab = '%s ' % qs[i] + ('' if len(stations) == 1 else str(s))
linekw['label'] = lab
line = plot.plot_discharge(data[s], ax, **linekw)
return line
@_plot_function
def plot_regime(self, stations=None, freq='d', minmax=False,
observed=False, ax=None, runs=None, output=None, **linekw):
"""Line plot of daily discharge of selected stations.
Arguments
---------
stations : None | str | iterable
Only show single (str) or subset (iterable) of stations. If None,
show all found in file.
freq : str
Regime frequency, d (daily) or m (monthly).
minmax : bool | dict
Show min-max range. May be a dictionary kwargs
parsed to ax.fill_between.
observed : bool
Add line for observed discharge. stations.daily_discharge_observed
must be configured.
**linekw :
Parse any keyword to the line plot function.
"""
stations = self._default_stations(stations)
data = {}
for st in ['mean'] + (['min', 'max'] if minmax else []):
data[st] = utils.aggregate_time(self[stations], regime=True,
freq=freq, regime_method=st)
# show range first if required
if minmax:
for s in stations:
fbkw = minmax if type(minmax) == dict else {}
fbkw.setdefault("alpha", 0.5)
ax.fill_between(data['min'][s].index, data['max'][s], **fbkw)
# plot observed
if observed:
obs = utils.aggregate_time(
(self.project.stations.daily_discharge_observed
.loc[self.index, stations]), regime=True, freq=freq)
clrs = plot.default_colors(len(stations), linekw.get('colors', []))
for c, s in zip(clrs, stations):
plot.plot_discharge(obs[s], ax, linestyle='--', color=c)
for s in stations:
# add label if multiple runs
if runs and len(runs[0]) > 1:
qs, i = runs
lab = '%s ' % qs[i] + ('' if len(stations) == 1 else str(s))
linekw['label'] = lab
line = plot.plot_discharge(data['mean'][s], ax, **linekw)
xlabs = {'d': 'Day of year', 'm': 'Month'}
ax.set_xlabel(xlabs[freq])
if freq == 'm':
ax.set_xticklabels([s[0] for s in calendar.month_abbr[1:]])
ax.set_xticks(range(1, 12+1))
ax.set_xlim(1, 12)
elif freq == 'd':
nd = np.array(calendar.mdays).cumsum()
nd[:-1] += 1
ax.set_xticks(nd)
ax.set_xlim(1, 365)
return line
@_plot_function
def plot_flow_duration(self, stations=None, ax=None, runs=None,
output=None, **linekw):
stations = self._default_stations(stations)
lines = []
for s in stations:
fd = hydro.flow_duration(self[s])
line = plot.plot_flow_duration(fd, ax=ax, **linekw)
lines.append(line)
return
@_plot_function
def plot_flow_duration_polar(self, station, percentilestep=10, freq='m',
colormap='jet_r', ax=None, runs=None,
output=None, **barkw):
"""Plot flow duration on a wheel of month or days of year.
Arguments
---------
station : str
A single station label (not possible for multiple stations).
percentilestep : % <= 50
Intervals of flow duration of 100%.
freq : 'm' | 'd'
Duration per month or day of year.
colormap : str
Matplotlib to use for the colour shading.
"""
if runs:
assert len(runs[0]) == 1
ax = plot.plot_flow_duration_polar(self[station], freq=freq, ax=ax,
percentilestep=percentilestep,
colormap=colormap, **barkw)
return ax
def peak_over_threshold(self, percentile=1, threshold=None, maxgap=None,
stations=None):
"""Identify peaks over a threshold, return max, length, date and recurrence.
Arguments
---------
percentile : number
The percentile threshold of q., e.g. 1 means Q1.
threshold : number, optional
Absolute threshold to use for peak identification.
maxgap : int, optional
Largest gap between two threshold exceedance periods to count as
single flood event. Number of timesteps. If not given, every
exceedance is counted as individual flood event.
stations : stationID | list of stationIDs
Return subset of stations. Default all.
Returns
-------
pd.DataFrame :
Peak discharge ordered dataframe with order index and peak q,
length, peak date and recurrence columns with MultiIndex if more
than one station is selected.
"""
stations = self._default_stations(stations)
kw = dict(percentile=percentile, threshold=threshold, maxgap=maxgap)
pot = [hydro.peak_over_threshold(self[s], **kw) for s in stations]
return pot[0] if len(stations) == 1 else pd.concat(pot, keys=stations)
def obs_sim_overlap(self, warmupyears=1):
"""Return overlapping obs and sim dataframes excluding warmup period.
Arguments
---------
warmupyears : int
Number of years to skip at beginng as warm up period.
Returns
-------
(pd.DataFrame, pd.DataFrame) : observed and simulated discharge.
"""
obs = self.project.stations.daily_discharge_observed
# exclude warmup period
sim = self[str(self.index[0].year+warmupyears):]
obsa, sima = obs.align(sim, join='inner')
# obsa can still have columns with only NAs
obsa.dropna(how='all', axis=1, inplace=True)
return obsa, sima[obsa.columns]
@property
def NSE(self):
"""pandas.Series of Nash-Sutcliff efficiency excluding warmup year."""
obs, sim = self.obs_sim_overlap()
return pd.Series({s: hydro.NSE(obs[s], sim[s]) for s in obs.columns})
@property
def rNSE(self):
"""pandas.Series of reverse Nash-Sutcliff efficiency (best = 0)"""
return 1 - self.NSE
@property
def pbias(self):
"""pandas.Series of percent bias excluding warmup year."""
obs, sim = self.obs_sim_overlap()
return pd.Series({s: hydro.pbias(obs[s], sim[s]) for s in obs.columns})
@property
def pbias_abs(self):
"""pandas.Series of absolute percent bias excluding warmup year."""
return self.pbias.abs()
class subbasin_daily_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'subd.prn')
plugin = ['to_raster']
@staticmethod
def from_project(path, **readkwargs):
def parse_time(y, d):
dto = dt.date(int(y), 1, 1) + dt.timedelta(int(d) - 1)
return pd.Period(dto, freq='d')
d = pd.read_csv(path, delim_whitespace=True, date_parser=parse_time,
parse_dates=[[0, 1]], index_col=[0, 1], **readkwargs)
d.index.names = ['time', 'subbasinID']
return d
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=[0, 1], parse_dates=[0],
date_parser=pd.Period, **readkwargs)
return df
def to_raster(self, variable, timestep=None, prefix=None, name=None,
strds=True, mapset=None):
# extra argument
"""variable : str
Selected variable (will be appended to default prefix).
"""
prefix = prefix or self.__class__.__name__ + '_' + variable.lower()
_subbasin_or_hydrotope_values_to_raster(
self.project, self[variable].unstack(),
self.project.subbasins.reclass, timestep=timestep, name=name,
prefix=prefix, strds=strds, mapset=mapset)
return
to_raster.__doc__ = (_subbasin_or_hydrotope_values_to_raster.__doc__ +
to_raster.__doc__)
class subbasin_monthly_waterbalance(subbasin_daily_waterbalance):
path = osp.join(RESDIR, 'subm.prn')
def from_project(self, path, **readkwargs):
styr = self.project.config_parameters['iyr']
def parse_time(y, m):
return pd.Period('%04i-%02i' % (styr+int(y)-1, int(m)), freq='m')
with open(path) as f:
header = f.readline().split()
df = pd.read_csv(f, delim_whitespace=True, skiprows=1, header=None,
index_col=[0, 1], date_parser=parse_time,
parse_dates=[[0, 1]], names=header, **readkwargs)
df.index.names = ['time', 'subbasinID']
return df
class subbasin_daily_discharge(ProjectOrRunData):
path = osp.join(RESDIR, 'Q_gauges_all_sub_routed_m3s.csv')
@staticmethod
def from_project(path, **readkwargs):
df = pd.read_csv(path, delim_whitespace=True, index_col=[0, 1],
**readkwargs)
dtms = [dt.date(y, 1, 1) + dt.timedelta(d - 1) for y, d in df.index]
df.index = pd.PeriodIndex(dtms, freq='d', name='time')
df.columns = df.columns.astype(int)
return df
@staticmethod
def from_csv(path, **readkwargs):
df = pd.read_csv(path, index_col=0, parse_dates=[0], **readkwargs)
df.index = df.index.to_period(freq='d')
df.columns = df.columns.astype(int)
return df
class subbasin_daily_runoff(subbasin_daily_discharge):
path = osp.join(RESDIR, 'Q_gauges_all_sub_mm.csv')
class catchment_daily_waterbalance(ProjectOrRunData):
path = osp.join(RESDIR, 'bad.prn')
@staticmethod
def from_project(path, **readkwargs):
df = | pd.read_csv(path, delim_whitespace=True, **readkwargs) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime
from binance.spot import Spot
from portfolio_keys import *
from misc_functions.common_functions import generic_dca_profit_loss, database_recon_update
from crypto_utils import _paring_returner,_price_coverter
Binanceclient = Spot(key=BINANCE_API_KEY, secret=BINANCE_PRIVATE_KEY)
# help functions for final report
def kraken_get_balance_deprecated(object):
return pd.DataFrame(object.query_private('Balance')['result'],index=[0])
def kraken_get_open_orders(object):
orders = object.query_private('OpenOrders')
df_orders = pd.DataFrame()
for order in orders['result']['open'].keys():
df_orders = pd.concat([df_orders,pd.DataFrame({
'open_time':datetime.fromtimestamp(orders['result']['open'][order]['opentm']).strftime("%A, %d %B %Y, %H:%M:%S")
,'pair': orders['result']['open'][order]['descr']['pair']
,'ordertype': orders['result']['open'][order]['descr']['ordertype']
,'type': orders['result']['open'][order]['descr']['type']
,'price': orders['result']['open'][order]['descr']['price']
,'vol':orders['result']['open'][order]['vol']
,'order': orders['result']['open'][order]['descr']['order']
,'fee':orders['result']['open'][order]['fee']
,'transaction_id' : order
},index=[order])])
df_orders['exchange'] = 'kraken'
return df_orders
def kraken_get_transactions_history_deprecated(object):
tradeshistory = object.query_private('TradesHistory')
df_tradeshistory = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import timeit
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
__package__ = 'Gemm testing'
NUM_REPEATS = 10
NUMBER = 500
def gemm_nn (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, jk -> ik', a, b, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
def gemm_nt (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
bt = b.T
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, kj -> ik', a, bt, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
if __name__ == '__main__':
import seaborn as sns
import pylab as plt
import pandas as pd
import numpy as np
N, M, K = (100, 200, 300)
times_nn = gemm_nn(N, M, K)
times_nt = gemm_nt(N, M, K)
ref = np.asarray(times_nn)
val = np.asarray(times_nt)
times_nt = np.asarray(times_nt)/ref
times_nn = np.asarray(times_nn)/ref
times_nn = | pd.DataFrame(data=times_nn, columns=['Times']) | pandas.DataFrame |
import pandas as pd
import xml.etree.ElementTree as ET
import lxml.etree as etree
most_serious_problem = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/most_serious_problem/special_eb_most_serious_problem_final.csv")
personally_taken_action = pd.read_csv(
"../data/processed_data/special_eb/data/3_final/personally_taken_action/special_eb_personally_taken_action_final.csv")
severity_of_problem = | pd.read_csv(
"../data/processed_data/special_eb/data/3_final/severity_of_problem/special_eb_severity_of_problem_final.csv") | pandas.read_csv |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
Series(np.nan, a.index),
]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
for e in [Series(["z"])]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
# vs scalars
index = list("bca")
t = Series([True, False, True])
for v in [True, 1, 2]:
result = | Series([True, False, True], index=index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Unit test for name_places.py
Created on Wed Oct 13 13:15:45 2021
@author: Beck
"""
import unittest
import pandas as pd
import csv
from scripts.feature_extraction.names_places import NamesPlacesFeature
class NamesPlacesFeatureTest(unittest.TestCase):
def setUp(self):
self.INPUT_COLUMN = "input"
self.names_places_feature = NamesPlacesFeature(self.INPUT_COLUMN)
self.df = pd.DataFrame()
def test_input_columns(self):
self.assertEqual(self.names_places_feature._input_columns, [self.INPUT_COLUMN])
def test_names_places(self):
df = | pd.read_csv("data/preprocessing/preprocessed.csv", quoting = csv.QUOTE_NONNUMERIC, lineterminator = "\n") | pandas.read_csv |
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
import os
import urllib.request
import json
import numpy as np
import pandas as pd
from multiprocessing.dummy import Pool
def download_file(name, spec):
url = spec['url']
# All of the datasets are ascii files, so save with extension .txt
save_file = 'data/raw/{}.txt'.format(name)
# file exists
if os.path.isfile(save_file):
print('Ignore', save_file, ': file exists')
return
try:
urllib.request.urlretrieve(url, save_file)
except Exception as e:
print('Got', type(e).__name__, '-', e,
'\nIgnore', save_file, ': fail to download')
else:
print('Succesfully download', save_file)
def download_datasets(data_spec):
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists('data/raw'):
os.makedirs('data/raw')
# download files
with Pool(16) as pool:
pool.starmap(download_file, data_spec.items())
def convert_to_npz(data_spec):
print()
print('-----------------------------------')
print('Dataset Conversion')
for name, spec in data_spec.items():
raw_file = 'data/raw/{}.txt'.format(name)
save_file = 'data/{}.npz'.format(name)
# read dataframe
layout = spec['layout']
try:
if layout == 'csv':
df = pd.read_csv(
raw_file,
header=None, engine='python',
**spec['kwargs'])
elif layout == 'txt':
df = pd.read_csv(
raw_file,
header=None, delim_whitespace=True, engine='python',
**spec['kwargs'])
elif layout == "special":
df = pd.read_csv(raw_file, engine='python', **spec['kwargs'])
except FileNotFoundError:
print('Ignore', raw_file, ': file not found on disk')
continue
# drop null rows
df = df[(df.values != spec['null_value']).all(axis=1)]
# preprocess
for col, method in spec['preprocess']:
if method == 'comma_string_to_float':
df[col] = df[col].replace({',':'.'}, regex=True).astype(float)
# split target y
y = pd.factorize(df[spec["target"]])[0]
# drop unused columns
X = df.drop(columns=[spec["target"]]+spec['drop_cols'])
# create dummy variables
if spec['dummy_cols']:
assert spec["target"] not in spec['dummy_cols']
dummies = pd.get_dummies(X[spec['dummy_cols']].astype(str), drop_first=True)
non_dummies = X.drop(columns=spec['dummy_cols'])
X = | pd.concat([non_dummies, dummies], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 28})
plt.rcParams['figure.figsize'] = (30, 15)
# This journal documents the manipulation of PV installation data for the USA. This covers selection of data, and weighting by marketshare.
# In[2]:
cwd = os.getcwd() #grabs current working directory
df_installs_raw = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/PVInstalls_USA_AllSources.csv", index_col='Year')
# In[3]:
sources = df_installs_raw.columns
#print(len(sources))
plt.plot(df_installs_raw.index,df_installs_raw[sources[0]],lw=4,marker='*',label=sources[0])
plt.plot(df_installs_raw.index,df_installs_raw[sources[1]],lw=3,marker='o',label=sources[1])
plt.plot(df_installs_raw.index,df_installs_raw[sources[2]],lw=2,marker='o',label=sources[2])
plt.plot(df_installs_raw.index,df_installs_raw[sources[3]],lw=2,marker='o',label=sources[3])
plt.plot(df_installs_raw.index,df_installs_raw[sources[4]],lw=2,marker='o',label=sources[4])
plt.plot(df_installs_raw.index,df_installs_raw[sources[5]],lw=2,marker='o',label=sources[5])
plt.yscale('log')
plt.ylabel('PV Installed (MW)')
plt.legend(bbox_to_anchor=(0, 1, 1, 0), loc="lower left")
#plt.plot(df_installs_raw, marker='o')
# # Select the data to use for installs
# The IRENA is consistently lower than the other sources from 2012 through the present. Given that all other sources are in agreement, we will select one of these data sets to use for installation data, rather than IRENA. In this case, we will select the Wood Mackenzie Power & Renewables quarterly reports and PV forecasts from 2010 through 2019.
# In[4]:
installs_2010_2019 = df_installs_raw.loc[(df_installs_raw.index>=2010) & (df_installs_raw.index<=2019)]
installs_recent = pd.DataFrame(installs_2010_2019[sources[0]])
installs_recent.columns = ['installed_pv_MW']
print(installs_recent)
# Only 1 dataset exists from 1995 to 2000, from IEA PVPS 2010 National Survey report. This seems to track reasonably well with Wood Mackenzie data between 2008 and 2010. Therefore, this will be used up to 2010.
# In[5]:
installs_upto2010 = df_installs_raw.loc[(df_installs_raw.index<2010)]
installs_old = | pd.DataFrame(installs_upto2010[sources[1]]) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# first, REMEMBER to activate cryptoalgowheel-S2 environment!
# %%
import datetime
import os
import sys
import backtrader as bt
import backtrader.analyzers as btanalyzers
import numpy as np
import pandas as pd
import matplotlib
import PyQt5
import seaborn
import sklearn
# %%
#*****WARNING: REVISE THE "dir" FOLDER PATHS!!!
datadir = "./data"
logdir = "./log"
reportdir = "./report"
datafile = "BTC_USDT_1h.csv" #!NOTICE: use our data "BTC_USDT_1h.csv" here
from_datetime = "2020-01-01 00:00:00"
to_datetime = "2020-04-01 00:00:00"
# %%
class OptDoubleSMACross(bt.Strategy):
params = (
("pfast", 10),
("pslow", 20),
)
def log(self, txt, dt=None, doprint=False): #(by default don't print log here)
if doprint:
dt = dt or self.datas[0].datetime.date(0)
print("%s, %s" % (dt.isoformat(), txt))
def __init__(self):
self.dataclose = self.datas[0].close
# add both "fast" and "slow" SimpleMovingAverage indicators
self.fastsma = bt.indicators.SimpleMovingAverage(self.datas[0], period = self.params.pfast)
self.slowsma = bt.indicators.SimpleMovingAverage(self.datas[0], period = self.params.pslow)
# add a "CrossOver" signal!!
self.crossover = bt.indicators.CrossOver(self.fastsma, self.slowsma) #NOTICE here passing in "fast" SMA as 1st line, "slow" SMA as 2nd line
#["CrossOver" indicator Usage reference: https://www.backtrader.com/home/helloalgotrading/; documentation: https://www.backtrader.com/docu/indautoref/#crossover]
# *****! Warning: avoid the erroreous cases of "pslow" larger than "pfast"!
# *** Documentation: https://www.backtrader.com/docu/exceptions/#strategyskiperror
if self.params.pslow < self.params.pfast+5:
raise bt.errors.StrategySkipError
def next(self):
if not self.position: #if not in the market yet (no "position" yet)
if self.crossover > 0: # "CrossOver" function return 1.0: meaning "fast SMA"(1st line) crosses the "slow SMA"(2nd line) upwards
#--BUY!
self.buy()
else: #("already in the market")
if self.crossover < 0: #"CrossOver" function return -1.0: meaning "fast SMA"(1st line) crosses the "slow SMA"(2nd line) downwards
#--SELL!
self.sell()
#*** added "Strategy hook" here - "stop" method, in order to record the portfolio final net value of each optimization round:
def stop(self):
self.log("Fast SMA Period %2d, Slow SMA Period %2d: Ending Value %.2f" %
(self.params.pfast, self.params.pslow, self.broker.getvalue()), doprint=True) #(do print the log message by the end of each optimization round here)
# %% [markdown]
# KPIs to be calculated: <br>
# - Return (ending and starting values)
# - MaxDrawDown
# - TotalTrades (number of trades, WinTrades + LossTrades)
# - WinTrades
# - LossTrades
# - WinRatio (WinTrades / TotalTrades)
# - AverageWin$ (TotalWins dollar value / WinTrades)
# - AverageLoss$ (TotalLosses dollar value / LossTrades)
# - AverageWinLossRatio (AverageWin\$ / AverageLoss\$)
# %%
if __name__ == "__main__":
cerebro = bt.Cerebro()
# feed data:
data = pd.read_csv(os.path.join(datadir, datafile), index_col="datetime", parse_dates=True)
data = data.loc[(data.index >= pd.to_datetime(from_datetime)) & (data.index <= | pd.to_datetime(to_datetime) | pandas.to_datetime |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas
from collections import defaultdict
import numpy
import ujson
from pathlib import Path
from typing import *
from experiments.arg_helper import get_sm_ids_by_name_range
from experiments.evaluation_metrics import smodel_eval, DataNodeMode
from experiments.previous_work.serene_2018.ssd import SSD
from semantic_modeling.data_io import get_semantic_models, get_ontology
from semantic_modeling.karma.semantic_model import SemanticModel
from semantic_modeling.utilities.ontology import Ontology
from semantic_modeling.utilities.serializable import deserializeJSON
from transformation.models.data_table import DataTable
from transformation.models.table_schema import Schema
def evaluate_serene_outputs(files: List[Path], ont: Ontology, gold_sm: Optional[SemanticModel]=None) -> Union[dict, None]:
try:
cor_ssd_file = [file for file in files if file.name.endswith(".cor_ssd.json")][0]
ssd_file = [file for file in files if file.name.endswith(".ssd.json")][0]
except Exception as e:
raise Exception("Invalid : %s" % files[0], e)
cor_ssd = SSD.from_file(cor_ssd_file, ont).clear_serene_footprint()
ssd = SSD.from_file(ssd_file, ont)
chuffed_ssds = []
for file in files:
if file.name.find(".chuffed") != -1:
objs = deserializeJSON(file)
chuffed_ssds.append([SSD.from_json(obj, ont) for obj in objs])
if gold_sm is None:
# SERENE can filter the cor_ssd graph to remove new-semantic types
gold_graph = cor_ssd.graph
else:
gold_graph = gold_sm.graph
eval_results = {}
for chuffed_idx, ssds in enumerate(chuffed_ssds):
eval_results[chuffed_idx] = {}
if len(ssds) == 0:
eval_results[chuffed_idx] = {
'precision': 0,
'recall': 0,
'f1': 0
}
else:
ssd = ssds[0]
# ssd.graph.render()
result = smodel_eval.f1_precision_recall(gold_graph, ssd.graph, DataNodeMode.NO_TOUCH)
eval_results[chuffed_idx]['precision'] = result['precision']
eval_results[chuffed_idx]['recall'] = result['recall']
eval_results[chuffed_idx]['f1'] = result['f1']
return eval_results
if __name__ == '__main__':
dataset = "museum_crm"
sms = get_semantic_models(dataset)
sms_index = {sm.id[:3]: sm for sm in sms}
ont = get_ontology(dataset)
ont.register_namespace("serene", "http://au.csiro.data61/serene/dev#")
# get serene output by sms
kfold_results = []
stype = "ReImplMinhISWC_False_pat"
for kfold in ["kfold-s01-s14", "kfold-s15-s28", "kfold-s08-s21"]:
kfold_sms_prefix = {sm[:3] for sm in get_sm_ids_by_name_range(*kfold.replace("kfold-", "").split("-"), [sm.id for sm in sms])}
print("==== KFOLD:", kfold, "====")
serene_output_dir = Path("/workspace/tmp/serene-python-client/datasets/%s/" % dataset) / kfold / f"predicted_{stype}"
serene_outputs = {}
if not serene_output_dir.exists():
print("not existed")
continue
for file in sorted(serene_output_dir.iterdir()):
if not file.name.startswith("s") or file.name.startswith("ser"):
continue
prefix = file.name[:3]
if prefix not in serene_outputs:
serene_outputs[prefix] = []
serene_outputs[prefix].append(file)
methods = defaultdict(lambda: {})
for prefix, files in sorted(serene_outputs.items(), key=lambda x: x[0]):
if prefix in kfold_sms_prefix:
continue
# if not prefix == "s09":
# continue
# sms_index[prefix].graph.render(80)
eval_res = evaluate_serene_outputs(files, ont, sms_index[prefix])
# print(prefix, ujson.dumps(eval_res, indent=4))
for i, res in eval_res.items():
methods[i][prefix] = {
'precision': res['precision'],
'recall': res['recall'],
'f1': res['f1'],
}
assert len(methods[0]) == 14
header = ["source"]
matrix = []
for method_idx, results in sorted(methods.items(), key=lambda x: x[0]):
header.append(f"{method_idx}_precision")
header.append(f"{method_idx}_recall")
header.append(f"{method_idx}_f1")
for prefix, o in sorted(methods[0].items(), key=lambda x: x[0]):
matrix.append([prefix])
for method_idx, results in sorted(methods.items(), key=lambda x: x[0]):
for i, (prefix, o) in enumerate(sorted(results.items(), key=lambda x: x[0])):
matrix[i].append(o['precision'])
matrix[i].append(o['recall'])
matrix[i].append(o['f1'])
print(matrix)
# print(DataTable.load_from_rows("", matrix).to_string())
df = pandas.DataFrame(data=matrix, columns=header)
matrix.append(['average'] + list(df.mean(axis=0)))
df = | pandas.DataFrame(data=matrix, columns=header) | pandas.DataFrame |
# Copyright 2016 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import os
from pandas.util.testing import assert_frame_equal
import numpy as np
import pandas as pd
import pytest
from hs2client.compat import Decimal
from hs2client.tests.common import ExampleEnv, random_table_name, insert_tuples
import hs2client as hs2
TEST_HOST = os.environ.get('HS2_TEST_IMPALA_HOST', 'localhost')
TEST_PORT = int(os.environ.get('HS2_TEST_IMPALA_PORT', 21050))
TEST_USER = os.environ.get('HS2_TEST_IMPALA_USER', getpass.getuser())
def test_service_session_scope():
svc = hs2.connect(TEST_HOST, TEST_PORT, TEST_USER)
session = svc.open_session()
svc = None
op = session.execute('show tables') # noqa
session = None
@pytest.fixture(scope='module')
def env1(request):
env = ExampleEnv(TEST_HOST, TEST_PORT, TEST_USER)
env.setup()
request.addfinalizer(env.teardown)
return env
def test_result_metadata(env1):
# As of Impala 2.5, only supports scalar types in result set
# metadata. hive can return more types; will need to test this
# separately
table_name = random_table_name()
sql = """
create table {0} (
f0 tinyint comment 'f0',
f1 smallint,
f2 int,
f3 bigint,
f4 float,
f5 double,
f6 char(10),
f7 varchar(20),
f8 string,
f9 decimal(12, 2),
f10 timestamp,
f11 boolean
)
""".format(table_name)
env1.session.execute_sync(sql)
op = env1.select_all(table_name)
schema = op.schema
expected = [
# name, type, comment
('f0', 'TINYINT', 'f0'),
('f1', 'SMALLINT', None),
('f2', 'INT', None),
('f3', 'BIGINT', None),
('f4', 'FLOAT', None),
('f5', 'DOUBLE', None),
('f6', 'CHAR', None),
('f7', 'VARCHAR', None),
('f8', 'STRING', None),
('f9', 'DECIMAL', None),
('f10', 'TIMESTAMP', None),
('f11', 'BOOLEAN', None)
]
assert len(schema) == len(expected)
assert schema.ncolumns == len(schema)
for i, attrs in enumerate(expected):
name, typename, comment = attrs
desc = schema[i]
coltype = desc.type
assert desc.name == name
assert coltype.name == typename
# Comments seem to not be coming back
# assert desc.comment == comment
# test decimal stuff
decimal = schema[9].type
assert decimal.precision == 12
assert decimal.scale == 2
# test char/varchar stuff
char = schema[6].type
assert char.max_length == 10
varchar = schema[7].type
assert varchar.max_length == 20
# ---------------------------------------------------------------------
# Well-behaved pandas types: float, double, string, timestamp
def test_pandas_fetch_numeric(env1):
all_names = []
all_types = []
all_data = []
ex_dtypes = []
def push(names, types, ex_dtypes, data):
all_names.extend(names)
all_types.extend(types)
all_data.extend(data)
ex_dtypes.extend(dtypes)
K = 20
length = 100
# Integer without nulls
names = ['i0', 'i1', 'i2', 'i3']
types = ['tinyint', 'smallint', 'int', 'bigint']
dtypes = ['i1', 'i2', 'i4', 'i8']
data = [[1, 2, 3, 4, 5] * K] * 4
push(names, types, dtypes, data)
expected = pd.DataFrame(index=range(length))
for name, np_type, data in zip(names, dtypes, data):
expected[name] = np.array(data, dtype=np_type)
# Integers with nulls
names = ['i4', 'i5', 'i6', 'i7']
types = ['tinyint', 'smallint', 'int', 'bigint']
dtypes = ['f8'] * 4
data = [[1, 2, None, 4, 5] * K] * 4
for name, np_type, arr in zip(names, dtypes, data):
# leave this conversion to pandas (automatically -> float64)
expected[name] = arr
push(names, types, dtypes, data)
# Floating point cases
names = ['float0', 'float1', 'double0', 'double1']
types = ['float', 'float', 'double', 'double']
dtypes = ['f4', 'f4', 'f8', 'f8']
data = [
[-0.5, 0, None, 0.5, 1] * K,
[-0.5, 0, 0.5, 1, 1.5] * K,
] * 2
for name, ctype, dtype, arr in zip(names, types, dtypes, data):
expected[name] = np.array(arr, dtype=dtype)
push(names, types, dtypes, data)
# We can do this in one shot
result = _roundtrip_data(env1, all_names, all_types, all_data)
assert_frame_equal(result, expected)
for name, ex_dtype in zip(all_names, ex_dtypes):
assert result[name].dtype == ex_dtype
def test_pandas_fetch_boolean(env1):
# Test with nulls and without
K = 20
data = [
[True, False, True, False, True] * K,
[True, None, None, False, True] * K
]
colnames = ['f0', 'f1']
coltypes = ['boolean', 'boolean']
# We can do this in one shot
expected = pd.DataFrame({name: np.array(arr)
for name, arr in zip(colnames, data)},
columns=colnames)
result = _roundtrip_data(env1, colnames, coltypes, data)
assert_frame_equal(result, expected)
# Boolean + nulls in pandas becomes numpy object
assert result['f0'].dtype == np.bool_
assert result['f1'].dtype == np.object_
def test_pandas_fetch_string(env1):
K = 20
data = [
['long string', 'foo', None, 'foo', 'long string'] * K
]
colnames = ['f0']
coltypes = ['string', 'boolean']
# We can do this in one shot
expected = pd.DataFrame({colnames[0]: data[0]})
result = _roundtrip_data(env1, colnames, coltypes, data)
assert_frame_equal(result, expected)
# Test string interning
c = result['f0']
assert c[0] is c[4]
assert c[1] is c[3]
def test_pandas_fetch_timestamp(env1):
K = 20
v = '2000-01-01 12:34:56.123456'
data = [
[v, None, '2001-01-01', v] * K
]
colnames = ['f0']
coltypes = ['timestamp']
# We can do this in one shot
expected = pd.DataFrame({colnames[0]: pd.to_datetime(data[0])})
result = _roundtrip_data(env1, colnames, coltypes, data)
assert_frame_equal(result, expected)
def test_pandas_fetch_decimal(env1):
K = 20
values = [-1.5, None, 0, 1.5] * K
colnames = ['f0']
coltypes = ['decimal(12,2)']
# We can do this in one shot
expected = pd.DataFrame({
colnames[0]: np.array([Decimal(x) if x is not None else x
for x in values], dtype=object)
})
result = _roundtrip_data(env1, colnames, coltypes, [values])
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
Series(np.nan, a.index),
]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
for e in [Series(["z"])]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
# vs scalars
index = list("bca")
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
tm.assert_series_equal(result, expected)
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
for v in [np.nan, "foo"]:
with pytest.raises(TypeError, match=msg):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
tm.assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
tm.assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
tm.assert_series_equal(result, expected)
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
for v in [np.nan]:
with pytest.raises(TypeError, match=msg):
t & v
def test_logical_ops_df_compat(self):
# GH#1134
s1 = Series([True, False, True], index=list("ABC"), name="x")
s2 = Series([True, True, False], index=list("ABD"), name="x")
exp = Series([True, False, False, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s1 | s2, exp_or1)
# np.nan | True => np.nan, filled with False
exp_or = Series([True, True, False, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s2 | s1, exp_or)
# DataFrame doesn't fill nan with False
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp.to_frame())
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp.to_frame())
exp = DataFrame({"x": [True, True, np.nan, np.nan]}, index=list("ABCD"))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp_or1.to_frame())
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp_or.to_frame())
# different length
s3 = Series([True, False, True], index=list("ABC"), name="x")
s4 = Series([True, True, True, True], index=list("ABCD"), name="x")
exp = Series([True, False, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")
tm.assert_series_equal(s3 | s4, exp_or1)
# True | np.nan => True
exp_or = Series([True, True, True, True], index=list("ABCD"), name="x")
| tm.assert_series_equal(s4 | s3, exp_or) | pandas._testing.assert_series_equal |
import typing as t
import numpy as np
import pandas as pd
import pytest
from statsmodels.tsa.holtwinters import ExponentialSmoothing, HoltWintersResults
from bentoml.statsmodels import StatsModel
from tests._internal.helpers import assert_have_file_extension
test_df = pd.DataFrame([[0, 0, 1, 1]])
def predict_df(model: t.Any, df: pd.DataFrame):
return model.predict(int(df.iat[0, 0]))
# exported from
# https://colab.research.google.com/github/bentoml/gallery/blob/master/statsmodels_holt/bentoml_statsmodels.ipynb
@pytest.fixture(scope="session")
def holt_model() -> "HoltWintersResults":
df: pd.DataFrame = pd.read_csv(
"https://raw.githubusercontent.com/jbrownlee/Datasets/master/shampoo.csv"
)
# Taking a test-train split of 80 %
train = df[0 : int(len(df) * 0.8)]
test = df[int(len(df) * 0.8) :]
# Pre-processing the Month field
train.Timestamp = pd.to_datetime(train.Month, format="%m-%d")
train.index = train.Timestamp
test.Timestamp = | pd.to_datetime(test.Month, format="%m-%d") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
assert (np.array_equal(result, exp))
# list of strings / unicode
result = isnull(['foo', 'bar'])
assert (not result.any())
result = isnull([u('foo'), u('bar')])
assert (not result.any())
def test_isnull_nat():
result = isnull([NaT])
exp = np.array([True])
assert (np.array_equal(result, exp))
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
assert (np.array_equal(result, exp))
def test_isnull_numpy_nat():
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime():
assert (not isnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert (notnull(idx).all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert (mask[0])
assert (not mask[1:].any())
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert (mask[0])
assert (not mask[1:].any())
mask = isnull(pidx[1:])
assert (not mask.any())
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
| Float64Index([0, np.nan]) | pandas.Float64Index |
# coding=utf-8
# /usr/bin/env python
'''
Author: wenqiangw
Email: <EMAIL>
Date: 2020-06-29 11:14
Desc:
监控评分卡开发样本和现有样本分数分布的差异程度,关注样本人群的稳定性。
1. Population Stability Index (PSI):群体稳定性指标,用来衡量分数分布的变化
2. Characteristic Stability Index (CSI):特征稳定性指标,用来衡量特征层面的变化
'''
import pickle
import pandas as pd
import numpy as np
from pynovice.score_card.src.data_binning import DataBinning
class StabilityIndex:
def __init__(self):
'''
计算模型分的稳定性psi:
需要加载bins&bins_statistic_dict,可计算calc_expect_bins_statistic函数
计算入模特征的稳定性csi:
需要加载bins&bins_statistic_dict和bins_statistic_dict,可计算calc_expect_bins_statistic和load_expect_bins_score函数
或者直接加载保存到本地的数据load_bins_statistic()/load_bins_score()
'''
self.bins_statistic_dict = {} # 各箱的count数量
self.bins_score_dict = {} # 各箱的对应贡献分
self.bins=[]
def calc_expect_bins_statistic(self,expect_array,bins=10.0,bins_func='frequence_blocks'):
'''
对expect数组进行分箱统计
:param expect_array:
:param bins: bins_list或者bins数
:param bins_func: bins为整数时使用bins_func进行分箱
:return:
'''
df = pd.Series(expect_array).dropna()
if isinstance(bins,list):
self.bins=bins
df_bins = pd.cut(df, bins=bins)
else:
binning = DataBinning(box_num=bins, _func=bins_func)
df_bins = binning.fit_transform(df_x=df)
self.bins = binning.bins
self.bins_statistic_dict = df_bins.value_counts().to_dict()
def calc_expect_bins_score(self,bins_score_dict):
'''
:param bins_score_dict: 各分bin对应的模型分值
:return:
'''
self.bins_score_dict = bins_score_dict
def load_bins_statistic(self,file_name):
with open(file_name, "rb") as f:
self.bins, self.bins_statistic_dict = pickle.load(f)
def load_bins_score(self,file_name):
with open(file_name, "rb") as f:
self.bins_score_dict = pickle.load(f)
def dump_bins_statistic(self,file_name):
# json not suppert Interval type
bins_info = [self.bins, self.bins_statistic_dict]
with open(file_name, "wb") as f:
pickle.dump(bins_info, f)
def get_psi(self,actual_array,eps=1e-4):
'''
psi = (实际占比-预期占比)/LN(实际占比/预期占比)
:param actual_array:
:param eps:
:return:
'''
df_expect = | pd.DataFrame.from_dict(self.bins_statistic_dict,orient='index',columns=['expected_samples']) | pandas.DataFrame.from_dict |
import praw
import pandas as pd
import pickle
# Define user agent details
r = praw.Reddit(user_agent=user_agent, client_id=reddit_client_id, client_secret=reddit_client_secret)
epressed_posts_titles = []
depressed_posts_content = []
depressed_comments = []
depressed_dict = {"title": [],
"id": [],
"num_comments": [],
"comments": [],
"content": []}
non_depressed_dict = {"title": [],
"id": [],
"num_comments": [],
"comments": [],
"content": []}
def depressed_data(subreddit):
for post in r.subreddit(subreddit).top(limit=1000):
post.comments.replace_more(limit=100)
depressed_dict['title'].append(post.title)
depressed_dict['id'].append(post.id)
depressed_dict['num_comments'].append(post.num_comments)
comments = post.comments.list()
comments_new = []
for comment in comments:
comments_new.append(comment.body)
depressed_dict['comments'].append(comments_new)
depressed_dict['content'].append(post.selftext)
depressed_df = pd.DataFrame(depressed_dict)
depressed_df.drop_duplicates(subset=['id'], inplace=True)
print("Total number of comments: ", sum(depressed_df['num_comments']))
print(depressed_df)
def non_depressed_data(subreddit):
for post in r.subreddit(subreddit).hot(limit=1000):
post.comments.replace_more(limit=100)
non_depressed_dict['title'].append(post.title)
non_depressed_dict['id'].append(post.id)
non_depressed_dict['num_comments'].append(post.num_comments)
comments = post.comments.list()
comments_new = []
for comment in comments:
comments_new.append(comment.body)
non_depressed_dict['comments'].append(comments_new)
non_depressed_dict['content'].append(post.selftext)
non_depressed_df = pd.DataFrame(non_depressed_dict)
non_depressed_df.drop_duplicates(subset=['id'], inplace=True)
print("Total number of comments: ", sum(non_depressed_df['num_comments']))
print(non_depressed_df)
depressed_data('depression')
non_depressed_data('askreddit')
depressed_pickle = open("depressed.pickle","wb")
pickle.dump(depressed_dict, depressed_pickle)
depressed_pickle.close()
test_depressed_pickle = open("depressed.pickle","rb")
test_depressed_pickle_dict = pickle.load(test_depressed_pickle)
depressed_df2 = | pd.DataFrame(test_depressed_pickle_dict) | pandas.DataFrame |
from __future__ import annotations
import pytest
from pandas.errors import ParserWarning
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
from pandas.io.xml import read_xml
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
@pytest.fixture(
params=[None, {"book": ["category", "title", "author", "year", "price"]}]
)
def iterparse(request):
return request.param
def read_xml_iterparse(data, **kwargs):
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write(data)
return read_xml(path, **kwargs)
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
</row>
</data>"""
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
<date>2020-01-01</date>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
<date>2021-01-01</date>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
<date>2022-01-01</date>
</row>
</data>"""
# DTYPE
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "str"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_all_str(parser):
df_result = read_xml(xml_dates, dtype="string", parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
dtype="string",
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": ["4.0", None, "3.0"],
"date": ["2020-01-01", "2021-01-01", "2022-01-01"],
},
dtype="string",
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_with_names(parser):
df_result = read_xml(
xml_dates,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
parser=parser,
)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"Col1": ["square", "circle", "triangle"],
"Col2": Series(["00360", "00360", "00180"]).astype("string"),
"Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"),
"Col4": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"sides": "Int64"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": Series([4.0, float("nan"), 3.0]).astype("Int64"),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_float(parser):
df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "float"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": Series([360, 360, 180]).astype("float"),
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_wrong_dtype(datapath, parser, iterparse):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
ValueError, match=('Unable to parse string "Everyday Italian" at position 0')
):
read_xml(filename, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse)
def test_both_dtype_converters(parser):
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
with tm.assert_produces_warning(ParserWarning, match="Both a converter and dtype"):
df_result = read_xml(
xml_types,
dtype={"degrees": "str"},
converters={"degrees": str},
parser=parser,
)
df_iter = read_xml_iterparse(
xml_types,
dtype={"degrees": "str"},
converters={"degrees": str},
parser=parser,
iterparse={"row": ["shape", "degrees", "sides"]},
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
# CONVERTERS
def test_converters_str(parser):
df_result = read_xml(xml_types, converters={"degrees": str}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
converters={"degrees": str},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_converters_date(parser):
convert_to_datetime = lambda x: to_datetime(x)
df_result = read_xml(
xml_dates, converters={"date": convert_to_datetime}, parser=parser
)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
converters={"date": convert_to_datetime},
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4.0, float("nan"), 3.0],
"date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_wrong_converters_type(datapath, parser, iterparse):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("Type converters must be a dict or subclass")):
read_xml(filename, converters={"year", str}, parser=parser, iterparse=iterparse)
def test_callable_func_converters(datapath, parser, iterparse):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("'float' object is not callable")):
read_xml(
filename, converters={"year": float()}, parser=parser, iterparse=iterparse
)
def test_callable_str_converters(datapath, parser, iterparse):
filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("'str' object is not callable")):
read_xml(
filename, converters={"year": "float"}, parser=parser, iterparse=iterparse
)
# PARSE DATES
def test_parse_dates_column_name(parser):
df_result = read_xml(xml_dates, parse_dates=["date"], parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
parse_dates=["date"],
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4.0, float("nan"), 3.0],
"date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_parse_dates_column_index(parser):
df_result = read_xml(xml_dates, parse_dates=[3], parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
parse_dates=[3],
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4.0, float("nan"), 3.0],
"date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]),
}
)
| tm.assert_frame_equal(df_result, df_expected) | pandas._testing.assert_frame_equal |
import abc
from datetime import datetime
from pathlib import Path
from typing import Type
import numpy as np
import pandas as pd
from dscience.tools.filesys_tools import FilesysTools
from dscience.core import PathLike
class AbstractSaveLoad(metaclass=abc.ABCMeta):
def save(self, path: PathLike) -> None:
raise NotImplementedError()
# noinspection PyAttributeOutsideInit
def load(self, path: PathLike):
raise NotImplementedError()
class SaveableTrainable(AbstractSaveLoad):
"""
A simpler saveable.=
Saves and loads a .info file with these properties.
To implement, just override save() and load(), and have each call its supermethod
"""
def __init__(self):
self.info = {}
def save(self, path: PathLike) -> None:
FilesysTools.save_json(self.info, path.with_suffix(path.suffix + ".info"))
def load(self, path: PathLike):
path = Path(path)
self.info = FilesysTools.load_json(path.with_suffix(path.suffix + ".info"))
def fix(key, value):
if key in ["started", "finished"]:
return datetime.isoformat(value)
elif isinstance(value, list):
return np.array(value)
else:
return value
self.info = {k: fix(k, v) for k, v in self.info.items()}
return self
class SaveableTrainableCsv(SaveableTrainable, metaclass=abc.ABCMeta):
def save(self, path: PathLike):
path = Path(path)
super().save(path)
self.data.to_csv(path)
# noinspection PyAttributeOutsideInit
def load(self, path: PathLike):
path = Path(path)
super().load(path)
self.data = pd.read_csv(path)
return self
class SaveLoadCsv(AbstractSaveLoad, metaclass=abc.ABCMeta):
"""
Has an attribute (property) called `data`.
"""
@property
@abc.abstractmethod
def data(self) -> pd.DataFrame:
raise NotImplementedError()
@data.setter
def data(self, df: pd.DataFrame):
raise NotImplementedError()
@property
def df_class(self) -> Type[pd.DataFrame]:
return pd.DataFrame
def save(self, path: PathLike):
if not isinstance(self.data, self.df_class):
raise TypeError("Type {} is not a {}".format(type(self.data), self.df_class))
path = Path(path)
pd.DataFrame(self.data).to_csv(path)
def load(self, path: PathLike):
path = Path(path)
self.data = self.df_class( | pd.read_csv(path) | pandas.read_csv |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.label_encoder import LabelEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"]}
df = pd.DataFrame(data=d)
le = LabelEncoder()
# check first column
le.fit(df["col1"])
data_json = le.to_json()
# values from column should be in data json
self.assertTrue("a" in data_json)
self.assertTrue("c" in data_json)
self.assertTrue("b" not in data_json)
# there is alphabetical order for values
self.assertEqual(0, data_json["a"])
self.assertEqual(1, data_json["c"])
# check next column
le.fit(df["col2"])
data_json = le.to_json()
self.assertEqual(0, data_json["d"])
self.assertEqual(1, data_json["e"])
self.assertEqual(2, data_json["w"])
def test_transform(self):
# training data
d = {"col1": ["a", "a", "c"]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
"""Core utilities"""
import sys
import logging
import inspect
from functools import singledispatch
from copy import deepcopy
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Sequence,
Union,
Tuple,
)
import numpy
from numpy import array as Array
import pandas
from pandas import Categorical, DataFrame, Series
from pipda import register_func
from pipda.symbolic import Reference
from pipda.utils import CallingEnvs
from .exceptions import (
ColumnNotExistingError,
DataUnrecyclable,
NameNonUniqueError,
)
from .contexts import Context
from .types import (
StringOrIter,
Dtype,
is_iterable,
is_scalar,
is_categorical,
is_null,
)
from .defaults import DEFAULT_COLUMN_PREFIX, NA_REPR
# logger
logger = logging.getLogger("datar")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(
logging.Formatter(
"[%(asctime)s][%(name)s][%(levelname)7s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(stream_handler)
def vars_select(
all_columns: Iterable[str],
*columns: Any,
raise_nonexists: bool = True,
base0: bool = None,
) -> List[int]:
# TODO: support selecting data-frame columns
"""Select columns
Args:
all_columns: The column pool to select
*columns: arguments to select from the pool
raise_nonexist: Whether raise exception when column not exists
in the pool
base0: Whether indexes are 0-based if columns are selected by indexes.
If not given, will use `datar.base.get_option('index.base.0')`
Returns:
The selected indexes for columns
Raises:
ColumnNotExistingError: When the column does not exist in the pool
and raise_nonexists is True.
"""
from .collections import Collection
from ..base import unique
columns = [
column.name if isinstance(column, Series) else column
for column in columns
]
selected = Collection(*columns, pool=list(all_columns), base0=base0)
if raise_nonexists and selected.unmatched and selected.unmatched != {None}:
raise ColumnNotExistingError(
f"Columns `{selected.unmatched}` do not exist."
)
return unique(selected).astype(int)
def recycle_value(
value: Any, size: int, name: str = None
) -> Union[DataFrame, numpy.ndarray]:
"""Recycle a value based on a dataframe
Args:
value: The value to be recycled
size: The size to recycle to
name: The name to show in the error if failed to recycle
Returns:
The recycled value
"""
# TODO: follow base R's recycling rule? i.e. size 2 -> 4
from ..base import NA
if is_scalar(value):
value = [value]
length = len(value)
if length not in (0, 1, size):
name = "value" if not name else f"`{name}`"
expect = "1" if size == 1 else f"(1, {size})"
raise DataUnrecyclable(
f"Cannot recycle {name} to size {size}, "
f"expect {expect}, got {length}."
)
if isinstance(value, DataFrame):
if length == size == 0:
return DataFrame(columns=value.columns)
if length == 0:
value = DataFrame([[NA] * value.shape[1]], columns=value.columns)
if length == 1 and size > length:
return value.iloc[[0] * size, :].reset_index(drop=True)
return value
cats = categorized(value).categories if is_categorical(value) else None
if length == size == 0:
return [] if cats is None else Categorical([], categories=cats)
if length == 0:
value = [NA]
if isinstance(value, Series):
# try to keep Series class
# some operators can only do with it or with it correctly
# For example:
# Series([True, True]) & Series([False, NA]) -> [False, Fa.se]
# But with numpy.array, it raises error, since NA is a float
if length == 1 and size > length:
value = value.iloc[[0] * size].reset_index(drop=True)
return value
if isinstance(value, tuple):
value = list(value)
# dtype = getattr(value, 'dtype', None)
if length == 1 and size > length:
value = list(value) * size
if cats is not None:
return Categorical(value, categories=cats)
is_elem_iter = any(is_iterable(val) for val in value)
if is_elem_iter:
# without dtype: VisibleDeprecationWarning
# return Array(value, dtype=object)
# The above does not keep [DataFrame()] structure
return value
# Avoid numpy.nan to be converted into 'nan' when other elements are string
out = Array(value)
if numpy.issubdtype(out.dtype, numpy.str_) and is_null(value).any():
return Array(value, dtype=object)
return out
def recycle_df(
df: DataFrame,
value: Any,
df_name: str = None,
value_name: str = None,
) -> Tuple[DataFrame, Any]:
"""Recycle the dataframe based on value"""
if length_of(df) == 1:
df = recycle_value(df, length_of(value), df_name)
value = recycle_value(value, length_of(df), value_name)
return df, value
def categorized(data: Any) -> Any:
"""Get the Categorical object"""
if not is_categorical(data):
return data
if isinstance(data, Series):
return data.values
return data
@singledispatch
def to_df(data: Any, name: str = None) -> DataFrame:
"""Convert an object to a data frame"""
if is_scalar(data):
data = [data]
if name is None:
return DataFrame(data)
return DataFrame({name: data})
@to_df.register(numpy.ndarray)
def _(data: numpy.ndarray, name: StringOrIter = None) -> DataFrame:
if name is not None and is_scalar(name):
name = [name]
if len(data.shape) == 1:
return (
DataFrame(data, columns=name)
if name is not None
else DataFrame(data)
)
ncols = data.shape[1]
if name is not None and len(name) == ncols:
return DataFrame(data, columns=name)
# ignore the name
return DataFrame(data)
@to_df.register(DataFrame)
def _(data: DataFrame, name: str = None) -> DataFrame:
if name is None:
return data
return DataFrame({f"{name}${col}": data[col] for col in data.columns})
@to_df.register(Series)
def _(data: Series, name: str = None) -> DataFrame:
name = name or data.name
return data.to_frame(name=name)
# @to_df.register(SeriesGroupBy)
# def _(data: SeriesGroupBy, name: str = None) -> DataFrame:
# name = name or data.obj.name
# return data.obj.to_frame(name=name).groupby(data.grouper, dropna=False)
def check_column_uniqueness(df: DataFrame, msg: str = None) -> None:
"""Check if column names are unique of a dataframe"""
uniq = set()
for col in df.columns:
if col not in uniq:
uniq.add(col)
else:
msg = msg or "Name is not unique"
raise NameNonUniqueError(f"{msg}: {col}")
def dict_insert_at(
container: Mapping[str, Any],
poskeys: Sequence[str],
value: Mapping[str, Any],
remove: bool = False,
) -> Mapping[str, Any]:
"""Insert value to a certain position of a dict"""
ret_items = [] # type: List[Tuple[str, Any]]
ret_items_append = ret_items.append
matched = False
for key, val in container.items():
if key == poskeys[0]:
matched = True
if not remove:
ret_items_append((key, val))
ret_items.extend(value.items())
elif matched and key in poskeys:
if not remove:
ret_items_append((key, val))
elif matched and key not in poskeys:
matched = False
ret_items_append((key, val))
else:
ret_items_append((key, val))
return dict(ret_items)
def name_mutatable_args(
*args: Union[Series, DataFrame, Mapping[str, Any]],
**kwargs: Any,
) -> Mapping[str, Any]:
"""Convert all mutatable arguments to named mappings, which can be easier
to mutate later on.
If there are Expression objects, keep it. So if an objects have multiple
names and it's built by an Expression, then the name might get lost here.
Examples:
>>> s = Series([1], name='a')
>>> name_mutatable_args(s, b=2)
>>> # {'a': s, 'b': 2}
>>> df = DataFrame({'x': [3], 'y': [4]})
>>> name_mutatable_args(df)
>>> # {'x': [3], 'y': [4]}
>>> name_mutatable_args(d=df)
>>> # {'d$x': [3], 'd$y': [4]}
"""
# order kept
ret = {} # type: dict
for i, arg in enumerate(args):
if isinstance(arg, Series):
ret[arg.name] = arg
elif isinstance(arg, dict):
ret.update(arg)
elif isinstance(arg, DataFrame):
ret.update(arg.to_dict("series"))
elif isinstance(arg, Reference):
ret[arg._pipda_ref] = arg
else:
ret[f"{DEFAULT_COLUMN_PREFIX}{i}"] = arg
for key, val in kwargs.items():
if isinstance(val, DataFrame):
val = val.to_dict("series")
if isinstance(val, dict):
existing_keys = [
ret_key
for ret_key in ret
if ret_key == key or ret_key.startswith(f"{key}$")
]
if existing_keys:
ret = dict_insert_at( # type: ignore
ret, existing_keys, {key: val}, remove=True
)
else:
for dkey, dval in val.items():
ret[f"{key}${dkey}"] = dval
else:
ret[key] = val
return ret
def arg_match(
arg: Any, argname: str, values: Iterable[Any], errmsg: str = None
) -> Any:
"""Make sure arg is in one of the values.
Mimics `rlang::arg_match`.
"""
if not errmsg:
values = list(values)
errmsg = f"`{argname}` must be one of {values}."
if arg not in values:
raise ValueError(errmsg)
return arg
def copy_attrs(df1: DataFrame, df2: DataFrame, deep: bool = True) -> None:
"""Copy attrs from df2 to df1"""
for key, val in df2.attrs.items():
if key.startswith("_"):
continue
df1.attrs[key] = deepcopy(val) if deep else val
def nargs(fun: Callable) -> int:
"""Get the number of arguments of a function"""
return len(inspect.signature(fun).parameters)
def position_at(
pos: int, length: int, base0: bool = None, raise_exc: bool = True
) -> int:
"""Get the 0-based position right at the given pos
When `pos` is negative, it acts like 0-based, meaning `-1` will anyway
represent the last position regardless of `base0`
Args:
pos: The given position
length: The length of the pool
base0: Whether the given `pos` is 0-based
raise_exc: Raise error if `pos` is out of range?
Returns:
The 0-based position
"""
from .collections import Collection
coll = Collection(pos, pool=length, base0=base0)
if raise_exc and coll.error:
raise coll.error
return coll[0]
def position_after(pos: int, length: int, base0: bool = None) -> int:
"""Get the 0-based position right at the given pos
Args:
pos: The given position
length: The length of the pool
Returns:
The position before the given position
"""
base0 = get_option("index.base.0", base0)
# after 0 with 1-based, should insert to first column
if not base0 and pos == 0:
return 0
return position_at(pos, length, base0) + 1
def get_option(key: str, value: Any = None) -> Any:
"""Get the option with key.
This is for interal use mostly.
This is a shortcut for:
>>> if value is not None:
>>> return value
>>> from datar.base import get_option
>>> return get_option(key)
"""
if value is not None:
return value
from ..base import get_option as get_option_
return get_option_(key)
def apply_dtypes(
df: DataFrame, dtypes: Union[bool, Dtype, Mapping[str, Dtype]]
) -> None:
"""Apply dtypes to data frame"""
if dtypes is None or dtypes is False:
return
if dtypes is True:
inferred = df.convert_dtypes()
for col in df:
df[col] = inferred[col]
return
if not isinstance(dtypes, dict):
dtypes = dict(zip(df.columns, [dtypes] * df.shape[1])) # type: ignore
for column, dtype in dtypes.items():
if column in df:
df[column] = df[column].astype(dtype)
else:
for col in df:
if col.startswith(f"{column}$"):
df[col] = df[col].astype(dtype)
def keep_column_order(df: DataFrame, order: Iterable[str]):
"""Keep the order of columns as given `order`
We cannot do `df[order]` directly, since `df` may have nested df columns.
"""
out_columns = []
for col in order:
if col in df:
out_columns.append(col)
else:
out_columns.extend(
(dfcol for dfcol in df.columns if dfcol.startswith(f"{col}$"))
)
if set(out_columns) != set(df.columns):
raise ValueError("Given `order` does not select all columns.")
return df[out_columns]
def reconstruct_tibble(
input: DataFrame,
output: DataFrame,
ungrouped_vars: List[str] = None,
keep_rowwise: bool = False,
) -> DataFrame:
"""Reconstruct the output dataframe based on input dataframe
Args:
input: The input data frame
output: The output data frame
ungrouped_vars: Variables to exclude from grouping
keep_rowwise: Whether rowwise structure should be kept
Return:
The reconstructed dataframe.
"""
from ..base import setdiff, intersect
from ..dplyr import group_vars, group_by_drop_default
from .grouped import DataFrameGroupBy, DataFrameRowwise
if ungrouped_vars is None:
ungrouped_vars = []
old_groups = group_vars(input, __calling_env=CallingEnvs.REGULAR)
new_groups = intersect(
setdiff(old_groups, ungrouped_vars, __calling_env=CallingEnvs.REGULAR),
output.columns,
__calling_env=CallingEnvs.REGULAR
)
if isinstance(input, DataFrameRowwise):
out = (
DataFrameRowwise(
output,
_group_vars=new_groups,
_group_drop=group_by_drop_default(input),
)
if keep_rowwise
else output
)
elif isinstance(input, DataFrameGroupBy) and len(new_groups) > 0:
out = DataFrameGroupBy(
output,
_group_vars=new_groups,
_group_drop=group_by_drop_default(input),
)
else:
out = output
copy_attrs(out, input)
return out
def df_getitem(df: DataFrame, ref: Any) -> Union[DataFrame, numpy.ndarray]:
"""Select columns from a data frame
If the column is a data frame, select that data frame.
"""
try:
return df[ref]
except KeyError:
cols = [col for col in df.columns if col.startswith(f"{ref}$")]
if not cols:
raise KeyError(ref) from None
ret = df.loc[:, cols]
ret.columns = [col[len(ref) + 1 :] for col in cols]
return ret
def df_setitem(
df: DataFrame, name: str, value: Any, allow_dups: bool = False
) -> DataFrame:
"""Assign an item to a dataframe
Args:
df: The data frame
name: The name of the item
value: The value to insert
allow_dups: Allow duplicated names
Returns:
df itself or a merged df
"""
value = recycle_value(value, df.shape[0])
if isinstance(value, DataFrame):
# nested df
value.columns = [f"{name}${col}" for col in value.columns]
if allow_dups:
return | pandas.concat([df, value], axis=1) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, | Series([np.nan]) | pandas.Series |
"""
废弃
新浪网设置了访问频次限制。
新浪有许多以列表形式提供的汇总列,每天访问也仅仅一次。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from datetime import date
from urllib.error import HTTPError
import pandas as pd
import requests
from bs4 import BeautifulSoup
import logbook
from toolz.itertoolz import partition_all
from ..setting.constants import QUOTE_COLS
from cnswd.utils import ensure_list
# from cnswd.data_proxy import DataProxy
from cnswd.websource.base import friendly_download, get_page_response
from .._exceptions import NoWebData, FrequentAccess
QUOTE_PATTERN = re.compile('"(.*)"')
NEWS_PATTERN = re.compile(r'\W+')
STOCK_CODE_PATTERN = re.compile(r'\d{6}')
SORT_PAT = re.compile(r'↑|↓')
DATA_BASE_URL = 'http://stock.finance.sina.com.cn/stock/go.php/'
MARGIN_COL_NAMES = [
'股票代码', '股票简称',
'融资余额', '融资买入额', '融资偿还额',
'融券余量金额', '融券余量', '融券卖出量', '融券偿还量', '融券余额'
]
INDEX_QUOTE_COLS = [
'指数简称', '最新价', '涨跌', '涨跌幅%', '成交量(万手)', '成交额(万元)'
]
logger = logbook.Logger('新浪网')
@friendly_download(10, 10, 10)
def fetch_company_info(stock_code):
"""获取公司基础信息"""
url_fmt = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{}.phtml'
url = url_fmt.format(stock_code)
df = pd.read_html(url, attrs={'id': 'comInfo1'})[0]
return df
def fetch_issue_new_stock_info(stock_code):
"""获取发行新股信息"""
url_fmt = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_NewStock/stockid/{}.phtml'
url = url_fmt.format(stock_code)
df = pd.read_html(url, attrs={'id': 'comInfo1'})[0]
return df
def _add_prefix(stock_code):
pre = stock_code[0]
if pre == '6':
return 'sh{}'.format(stock_code)
else:
return 'sz{}'.format(stock_code)
def _to_dataframe(content, p_codes):
"""解析网页数据,返回DataFrame对象"""
res = [x.split(',') for x in re.findall(QUOTE_PATTERN, content)]
df = pd.DataFrame(res).iloc[:, :32]
df.columns = QUOTE_COLS[1:]
df.insert(0, '股票代码', p_codes)
# df['股票代码'] = p_codes
df.dropna(inplace=True)
return df
def fetch_quotes(stock_codes):
"""
获取股票列表的分时报价
Parameters
----------
stock_codes : list
股票代码列表
Returns
-------
res : DataFrame
行数 = len(stock_codes)
33列
Example
-------
>>> df = fetch_quotes(['000001','000002'])
>>> df.iloc[:,:8]
股票代码 股票简称 开盘 前收盘 现价 最高 最低 竞买价
0 000001 平安银行 11.040 11.050 10.900 11.050 10.880 10.900
1 000002 万 科A 33.700 34.160 33.290 33.990 33.170 33.290
"""
stock_codes = ensure_list(stock_codes)
num = len(stock_codes)
length = 800
url_fmt = 'http://hq.sinajs.cn/list={}'
dfs = []
for p_codes in partition_all(length, stock_codes):
# p_codes = stock_codes[i * length:(i + 1) * length]
url = url_fmt.format(','.join(map(_add_prefix, p_codes)))
content = get_page_response(url).text
dfs.append(_to_dataframe(content, p_codes))
return pd.concat(dfs).sort_values('股票代码')
def _add_index_prefix(code):
pre = code[0]
if pre == '0':
return 's_sh{}'.format(code)
else:
return 's_sz{}'.format(code)
def _to_index_dataframe(content, p_codes):
"""解析网页数据,返回DataFrame对象"""
res = [x.split(',') for x in re.findall(QUOTE_PATTERN, content)]
df = pd.DataFrame(res)
df.columns = INDEX_QUOTE_COLS
df.insert(0, '指数代码', p_codes)
df['成交时间'] = pd.Timestamp.now().round('T')
df.dropna(inplace=True)
return df
def fetch_index_quotes(codes):
"""
获取指数列表的分时报价
Parameters
----------
codes : list
代码列表
Returns
-------
res : DataFrame
行数 = len(stock_codes)
33列
Example
-------
>>> df = fetch_index_quotes(['000001','000002'])
>>> df.iloc[:,:8]
股票代码 股票简称 开盘 前收盘 现价 最高 最低 竞买价
0 000001 平安银行 11.040 11.050 10.900 11.050 10.880 10.900
1 000002 万 科A 33.700 34.160 33.290 33.990 33.170 33.290
"""
codes = ensure_list(codes)
length = 800
url_fmt = 'http://hq.sinajs.cn/list={}'
dfs = []
for p_codes in partition_all(length, codes):
url = url_fmt.format(','.join(map(_add_index_prefix, p_codes)))
content = get_page_response(url).text
dfs.append(_to_index_dataframe(content, p_codes))
return pd.concat(dfs).sort_values('指数代码')
# 不可用
def fetch_globalnews():
"""获取24*7全球财经新闻"""
url = 'http://live.sina.com.cn/zt/f/v/finance/globalnews1'
response = requests.get(url)
today = date.today()
soup = BeautifulSoup(response.content, "lxml")
# 时间戳
stamps = [p.string for p in soup.find_all("p", class_="bd_i_time_c")]
# 标题
titles = [p.string for p in soup.find_all("p", class_="bd_i_txt_c")]
# 类别
categories = [
re.sub(NEWS_PATTERN, '', p.string)
for p in soup.find_all("p", class_="bd_i_tags")
]
# 编码bd_i bd_i_og clearfix
data_mid = ['{} {}'.format(str(today), t) for t in stamps]
return stamps, titles, categories, data_mid
@friendly_download(10, 10, 2)
def fetch_cjmx(stock_code, date_):
"""
下载指定股票代码所在日期成交明细
Parameters
----------
stock_code : str
股票代码(6位数字代码)
date_ : 类似日期对象
代表有效日期字符串或者日期对象
Returns
-------
res : DataFrame
Exception
---------
当不存在数据时,触发NoWebData异常
当频繁访问时,系统会在一段时间内阻止访问,触发FrequentAccess异常
Example
-------
>>> df = fetch_cjmx('300002','2016-6-1')
>>> df.head()
成交时间 成交价 价格变动 成交量(手) 成交额(元) 性质
0 15:00:03 8.69 NaN 1901 1652438 卖盘
1 14:57:03 8.69 -0.01 10 8690 卖盘
2 14:56:57 8.70 NaN 102 88740 买盘
3 14:56:51 8.70 NaN 15 13049 买盘
4 14:56:48 8.70 0.01 2 1739 买盘
"""
dfs = []
url = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php'
code_str = _add_prefix(stock_code)
date_str = pd.Timestamp(date_).strftime(r'%Y-%m-%d')
params = {'symbol': code_str, 'date': date_str, 'page': 1}
# 单日交易数据不可能超过1000页
for i in range(1, 1000):
params['page'] = i
r = requests.get(url, params=params)
r.encoding = 'gb18030'
df = pd.read_html(r.text, attrs={'id': 'datatbl'}, na_values=['--'])[0]
if '没有交易数据' in df.iat[0, 0]:
df = pd.DataFrame()
break
dfs.append(df)
res = pd.concat(dfs)
if len(res) == 0:
raise NoWebData('无法在新浪网获取成交明细数据。股票:{},日期:{}'.format(
code_str, date_str))
return res
@friendly_download(10, 10, 1)
def _common_fun(url, pages, header=0, verbose=False):
"""处理新浪数据中心网页数据通用函数"""
dfs = []
def sina_read_fun(x):
return pd.read_html(
x,
header=header,
na_values=['--'],
flavor='html5lib',
attrs={'class': 'list_table'})[0]
for i in range(1, pages + 1):
page_url = url + 'p={}'.format(i)
if verbose:
logger.info('第{}页'.format(i))
df = sina_read_fun(page_url)
dfs.append(df)
return | pd.concat(dfs, ignore_index=True) | pandas.concat |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert | CBMonthEnd(10) | pandas._libs.tslibs.offsets.CBMonthEnd |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime
import seaborn as sns
from scipy.stats import pearsonr
from matplotlib import cm as cm
from statsmodels.tsa.stattools import adfuller
import warnings
################Stationary check##################################################################################################################################
def plot_graph (timeseries, unit, title):
rolmean = pd.rolling_mean(timeseries, window=720)
rolstd = | pd.rolling_std(timeseries, window=12) | pandas.rolling_std |
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.metrics import confusion_matrix
from keras import optimizers
FILENAME = 'dwt.csv'
SEED = 200
def save_model(model, path,filename):
filename = os.path.join(path,filename + '.h5')
model.save(filename)
def encode_text_index(df, name):
"""
Label Encoding using sklearn.preporcessing. Transforms labels into integers i.e: [a, b, c] => [1, 2, 3]
df: pandas.DataFrame
name: string
"""
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
def prepare_data(path: str):
# Reads data from file, and splits it into training and testing data
dataset = pd.read_csv(FILENAME, sep=',', decimal=',')
print("The last column is {0}".format(dataset.columns[-1]))
last_column_name = dataset.columns[-1]
x_data, y_data = to_xy(dataset, last_column_name)
return train_test_split(x_data,y_data,test_size=0.25,random_state=47)
def get_data_without_encoding(path: str):
# Reads data from file, and splits it into training and testing data
dataset = pd.read_csv(FILENAME, sep=',', decimal=',')
print("The last column is {0}".format(dataset.columns[-1]))
last_column_name = dataset.columns[-1]
# x_data, y_data = to_xy(dataset, last_column_name)
# trainX, x_test, trainY, y_test = train_test_split(x_data,y_data,test_size=0.25,random_state=47)
return dataset.to_numpy()[:,0 :dataset.shape[1] - 1], dataset.to_numpy()[:,-1]
def train_with_cross_validation(model_function,X, y, epochs=10, cv=3, batch_size=16):
cross_validation_model = KerasClassifier(build_fn=model_function, epochs=epochs, batch_size=batch_size, verbose=1)
print(cross_val_score(cross_validation_model, X, y, cv=cv))
def fit_model(model, trainX, trainY,batch_size=16, epochs=10, validation_split=0.20, k_fold=3):
Adam=optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.99, epsilon=1e-08, decay=0.0, amsgrad=False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['acc'])
scores=model.fit(trainX, trainY, epochs=epochs,batch_size=batch_size,verbose=0, validation_split=validation_split)
return model
def evalute_model(model, testX, testY, run_ensamble=True):
print("EVALUATING MODEL: {0}".format(model.name))
if(run_ensamble):
Adam=optimizers.Adam(lr=0.5, beta_1=0.9, beta_2=0.999, epsilon=1e-06, decay=0.0, amsgrad=False)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['mae','acc'])
scores = model.evaluate(testX, testY, verbose = 2)
print(model.metrics_names)
return scores
def generate_fold(X, Y, k=3):
folds = list(KFold(n_splits=k, shuffle=True, random_state=1).split(X, Y))
return folds
def get_models(folder=''):
if(folder == ''):
folder = '.'
else:
folder = folder + '/'
models = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and f.endswith(".h5")]
return models
def load_models(models, path="models"):
changed_models = []
for i in range(len(models)):
model=load_model(os.path.join(path,models[i]))
import pdb; pdb.set_trace()
changed_models.append(model)
return changed_models
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Transform data to fit the format acceptable by Keras model
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = | pd.get_dummies(df[target]) | pandas.get_dummies |
#!/usr/bin/env python3
import argparse
import sys
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import altair as alt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--application', help='Plot only the given application')
parser.add_argument('data', help='The data to plot, in CSV or DataFrame pickle format')
return parser.parse_args()
# Plots application `appname`
def plot(results, appname):
appdata = results[results.application == appname]
if len(appdata) == 0:
print(f'No data to plot for {appname}.')
return
if appdata[appdata.svewidth == 0].groupby('version').sum()['count'].max() >= 1e9:
scale = 'billion'
appdata.loc[:, 'count'] /= 1e9
else:
scale = 'million'
appdata.loc[:, 'count'] /= 1e6
fname = f'opcount-{appname}-all-clustered-stacked-group.png'
alt.Chart(appdata).mark_bar().encode(x=alt.X('version', title='', axis=alt.Axis(labelAngle=-30)),
y=alt.Y('sum(count)', title=f'Dynamic execution count ({scale} instructions)'),
column='svewidth',
color=alt.Color('optype', title='Op Group', scale=alt.Scale(scheme='set2')))\
.configure(background='white')\
.configure_title(anchor='middle', fontSize=14)\
.properties(title=appname)\
.save(fname, scale_factor='2.0')
print(f'Saved plot for {appname} in {fname}.')
def main():
args = parse_args()
if args.data.endswith('csv'):
df = pd.read_csv(args.data)
else:
df = pd.read_pickle(args.data)
df['svewidth'] = | pd.to_numeric(df.svewidth) | pandas.to_numeric |
import numpy as np
import pandas as pd
from pandas import read_csv
import matplotlib.pyplot as plt
#%matplotlib inline
#%%
attrib = read_csv('attributes.csv', delim_whitespace = True)
data = read_csv('communities.data', names = attrib['attributes'])
print(data.shape)
#%%
data.head()
#%%
'''
Remove non-predictive features
state: US state (by number) - not counted as predictive above, but if considered, should be considered nominal (nominal)
county: numeric code for county - not predictive, and many missing values (numeric)
community: numeric code for community - not predictive and many missing values (numeric)
communityname: community name - not predictive - for information only (string)
fold: fold number for non-random 10 fold cross validation, potentially useful for debugging, paired tests - not predictive (numeric)
'''
data = data.drop(columns=['state','county',
'community','communityname',
'fold'], axis=1)
#%%
data.head()
#%%
'''
Remove column with NA
Some of the features contained many missing values as some surveys were not conducted in some communities,
so they were removed from the data:
'OtherPerCap', 'LemasSwornFT', 'LemasSwFTPerPop', 'LemasSwFTFieldOps',
'LemasSwFTFieldPerPop', 'LemasTotalReq', 'LemasTotReqPerPop', 'PolicReqPerOffic', 'PolicPerPop',
'RacialMatchCommPol', 'PctPolicWhite', 'PctPolicBlack', 'PctPolicHisp', 'PctPolicAsian', 'PctPolicMinor',
'OfficAssgnDrugUnits', 'NumKindsDrugsSeiz', 'PolicAveOTWorked', 'PolicCars', 'PolicOperBudg', 'LemasPctPolicOnPatr',
'LemasGangUnitDeploy', 'PolicBudgPerPop'
'''
from pandas import DataFrame
data = data.replace('?', np.nan)
feat_miss = data.columns[data.isnull().any()]
print(feat_miss)
data = data.drop(columns=list(feat_miss), axis=1)
#%%
print(data.shape)
data.head()
#%%
data.describe()
#%%
# ViolentCrimesPerPop: total number of violent crimes per 100K popuation (numeric - decimal)
# GOAL attribute (to be predicted)
data.hist(column = ['ViolentCrimesPerPop'], bins = 30, color = 'red', alpha = 0.8)
plt.show()
#%%
# TODO Correlations
import seaborn as sns
corrmat = data.corr()
fig = plt.figure(figsize = (16, 12))
sns.heatmap(corrmat, vmax = 0.8)
plt.show()
#%%
corrT = data.corr(method = 'pearson').round(4)
corrT = corrT.sort_values(by=['ViolentCrimesPerPop'])
corrT_VCPP = corrT['ViolentCrimesPerPop']
#%%
'''
Remove Multicollinearity
set VIF = 5, R^2 = 0.8 to remove attributes
'''
'''Dimensionality Reduction - Principal Component Analysis (PCA)
The dataset contain many variables highly
correlated. Multicolinearity will increase the model variance. Dimensionality reduction utilizing PCA can provide an
optimal set of orthogonal features. Let's adopt the criterion in which we select those principal components
responsible to explain more than a unit variance ("eigenvalue one criterion"). '''
X_DF = data.iloc[:, 0:99]
# data.to_csv("data_removed.csv")
# Detecting Multicollinearity using VIF
from statsmodels.stats.outliers_influence import variance_inflation_factor
def calc_vif(X_DF):
# X_DF = pd.DataFrame(X)
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X_DF.columns
vif["VIF"] = [variance_inflation_factor(X_DF.values, i) for i in range(X_DF.shape[1])]
return(vif)
VIF = calc_vif(X_DF)
#%%
data_to_dump = VIF.where(VIF['VIF'] > 30)
data_to_dump = data_to_dump.dropna(how='any')
columns_to_dump = list(data_to_dump.iloc[:, 0])
X_DF = data.drop(columns=columns_to_dump, axis=1)
#%%
# VIF_2 = calc_vif(X_DF)
'''
Now we have two racePct*** remain, consider corrT_VCPP['racePctAsian'] = 0.0376, corrT_VCPP['racePctHisp'] = 0.2931,
which means racePctAsian is not very related to ViolentCrimesPerPop, so to simplify
the model, we only keep racePctWhite as our sensitive variable.
'''
X_DF = X_DF.drop(columns=['racePctAsian', 'racePctHisp'], axis=1)
print("Removed columns(", len(columns_to_dump) + 2, "):\n", (columns_to_dump + ['racePctAsian', 'racePctHisp']))
#%%
from sklearn.model_selection import train_test_split
X = X_DF.values
y = data.iloc[:, 99].values
seed = 0
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = seed)
print(X.shape)
print(y.shape)
#%%
from sklearn.preprocessing import StandardScaler
# Standardize features by removing the mean and scaling to unit variance
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#%%
# Perform PCA
# from sklearn.decomposition import PCA
#
# c = 14
# pca = PCA(n_components = c)
# X_train = pca.fit_transform(X_train)
# X_test = pca.transform(X_test)
#
# print("Amount of variance: %s" % pca.explained_variance_)
# print("Sum of the variance: %s" % sum(pca.explained_variance_).round(2))
#
# print("Percentage of variance: %s" % pca.explained_variance_ratio_)
# print("Sum of the percentage of variance: %s" % sum(pca.explained_variance_ratio_).round(2))
#
#
# plt.scatter(np.arange(1,(c+1)),pca.explained_variance_, c = 'red')
# plt.plot((0,15), (1,1), color = 'black', linestyle = 'dashed')
# plt.xlabel('PC')
# plt.ylabel('Amount of variance explained')
# plt.show()
# print(X_train.shape)
#%%
| pd.DataFrame(X_train) | pandas.DataFrame |
import sys
import configparser
import time
import re
import pandas as pd
from pandas import Series,DataFrame
#统计可能含日期的口令(连续数字位数大于等于4的)
def countProbPasswd(passwdList):
df = []
for i in range(len(passwdList)):
passwd = str(passwdList[i])
struc = ""
for ch in passwd:
if ch.isdigit():
struc += 'D'
elif ch.isalpha():
struc += 'L'
else:
struc += 'S'
char = struc[0]
c = 1
stri = struc[1:]
res = ''
for j in stri:
if j == char:
c += 1
else:
res += char
res += str(c)
char = j
c = 1
res += char
res += str(c)
#r'D[4-9]|D\d{2}'
if re.search(r'D[4-9]|D\d{2}', res):
df.append(passwd)
return df
#统计含数字日期的口令-Yahoo
# 筛选出的符合条件的密码在 date_passwd/Yahoo 路径下
def analysisDate_Yahoo(data):
lis1 = []
lis2 = []
lis3 = []
lis4 = []
lis5 = []
lis6 = []
lis7 = []
lis8 = []
lis9 = []
datePasswd = {'yyyy':0,'yyyymm':0,'yyyymmdd':0,'mmddyyyy':0,'ddmmyyyy':0,'yymmdd':0,'mmddyy':0,'ddmmyy':0,'mmdd':0}
for i in data:
# 密码判断条件由长到短,符合多种条件的密码只归类于先进行判断的条件
# 例如19800205,归类于yyyy-mm-dd而不是yyyy
#yyyy-mm-dd
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yyyymmdd'] += 1
lis3.append(i)
continue
#mm-dd-yyyy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])(19\d{2}|20\d{2})',i):
datePasswd['mmddyyyy'] += 1
lis4.append(i)
continue
#dd-mm-yyyy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])(19\d{2}|20\d{2})',i):
datePasswd['ddmmyyyy'] += 1
lis5.append(i)
continue
#yy-mm-dd
if re.search(r'[0-9][0-9](0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yymmdd'] += 1
lis6.append(i)
continue
#mm-dd-yy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])[0-9][0-9]',i):
datePasswd['mmddyy'] += 1
lis7.append(i)
continue
#dd-mm-yy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])[0-9][0-9]',i):
datePasswd['ddmmyy'] += 1
lis8.append(i)
continue
#yyyy-mm
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])',i):
datePasswd['yyyymm'] += 1
lis2.append(i)
continue
#yyyy 1900-2100
if re.search(r'19\d{2}|20\d{2}',i):
datePasswd['yyyy'] += 1
lis1.append(i)
continue
#mm-dd
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['mmdd'] += 1
lis9.append(i)
continue
pd.Series(lis1).to_csv('date_passwd/Yahoo/yyyy.csv')
pd.Series(lis2).to_csv('date_passwd/Yahoo/yyyymm.csv')
pd.Series(lis3).to_csv('date_passwd/Yahoo/yyyymmdd.csv')
pd.Series(lis4).to_csv('date_passwd/Yahoo/mmddyyyy.csv')
pd.Series(lis5).to_csv('date_passwd/Yahoo/ddmmyyyy.csv')
pd.Series(lis6).to_csv('date_passwd/Yahoo/yymmdd.csv')
pd.Series(lis7).to_csv('date_passwd/Yahoo/mmddyy.csv')
pd.Series(lis8).to_csv('date_passwd/Yahoo/ddmmyy.csv')
pd.Series(lis9).to_csv('date_passwd/Yahoo/mmdd.csv')
print('-------------Date passwd in Yahoo------------------')
print(datePasswd)
print('--------------------------------------------------')
#统计含数字日期的口令-csdn-密码
def analysisDate_csdn(data):
lis1 = []
lis2 = []
lis3 = []
lis4 = []
lis5 = []
lis6 = []
lis7 = []
lis8 = []
lis9 = []
datePasswd = {'yyyy':0,'yyyymm':0,'yyyymmdd':0,'mmddyyyy':0,'ddmmyyyy':0,'yymmdd':0,'mmddyy':0,'ddmmyy':0,'mmdd':0}
for i in data:
#yyyy-mm-dd
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yyyymmdd'] += 1
lis3.append(i)
continue
#mm-dd-yyyy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])(19\d{2}|20\d{2})',i):
datePasswd['mmddyyyy'] += 1
lis4.append(i)
continue
#dd-mm-yyyy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])(19\d{2}|20\d{2})',i):
datePasswd['ddmmyyyy'] += 1
lis5.append(i)
continue
#yy-mm-dd
if re.search(r'[0-9][0-9](0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['yymmdd'] += 1
lis6.append(i)
continue
#mm-dd-yy
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])[0-9][0-9]',i):
datePasswd['mmddyy'] += 1
lis7.append(i)
continue
#dd-mm-yy
if re.search(r'(0[1-9]|[1-2][0-9]|3[0-1])(0[1-9]|1[0-2])[0-9][0-9]',i):
datePasswd['ddmmyy'] += 1
lis8.append(i)
continue
#yyyy-mm
if re.search(r'(19\d{2}|20\d{2})(0[1-9]|1[0-2])',i):
datePasswd['yyyymm'] += 1
lis2.append(i)
continue
#yyyy 1900-2100
if re.search(r'19\d{2}|20\d{2}',i):
datePasswd['yyyy'] += 1
lis1.append(i)
continue
#mm-dd
if re.search(r'(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])',i):
datePasswd['mmdd'] += 1
lis9.append(i)
continue
pd.Series(lis1).to_csv('date_passwd/csdn/yyyy.csv')
| pd.Series(lis2) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.