prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import logging
import os
import re
import warnings
# import numpy as np
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import sqlalchemy as sa
# from odo import odo
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import sessionmaker
import spherpro.bro as bro
import spherpro.bromodules.io_anndata as io_anndata
import spherpro.configuration as config
import spherpro.db as db
import spherpro.library as lib
DICT_DB_KEYS = {
"image_id": db.images.image_id.key,
"object_number": db.objects.object_number.key,
"measurement_type": db.measurement_types.measurement_type.key,
"measurement_name": db.measurement_names.measurement_name.key,
"stack_name": db.stacks.stack_name.key,
"plane_id": db.ref_planes.ref_plane_number.key,
"object_id": db.objects.object_id.key,
}
OBJECTS_STACKNAME = "ObjectStack"
OBJECTS_CHANNELNAME = "object"
OBJECTS_PLANEID = "1"
OBJECTS_CHANNELTYPE = "object"
READONLY = "_readonly"
class DataStore(object):
"""DataStore
The DataStore class is intended to be used as a storage for spheroid IMC
data.
Methods:
Base:
read_config: read configfile
import_data: reads and writes data to the database
resume_data: reads non-database files and configures backend
"""
def __init__(self):
# init empty properties here
self.experiment_layout = None
self.barcode_key = None
self.well_measurements = None
self.cut_meta = None
self.roi_meta = None
self.channel_meta = None
self.sphere_meta = None
self.measurement_meta_cache = None
self._pannel = None
self._session = None
self._session_maker = None
self.connectors = {
config.CON_SQLITE: db.connect_sqlite,
config.CON_SQLITE + READONLY: db.connect_sqlite_ro,
config.CON_MYSQL: db.connect_mysql,
config.CON_POSTGRESQL: db.connect_postgresql,
}
#########################################################################
#########################################################################
# Import or Resume functions: #
#########################################################################
#########################################################################
def read_config(self, configpath):
"""
finds the measurement meta information from a given string
Args:
configpath: A string denoting the location of the config file
Raises:
YAMLError
"""
self.conf = config.read_configuration(configpath)
def import_data(self, minimal=None):
"""read_data
Reads the Data using the file locations given in the configfile.
Args:
minimal: Bool, if True, the import process only imports values from
the RefStacks and no location values
"""
if minimal is None:
minimal = False
# Read the data based on the config
self._read_experiment_layout()
self._read_barcode_key()
# self._read_measurement_data()
self._read_image_data()
self._read_relation_data()
self._read_stack_meta()
self._populate_db(minimal)
def resume_data(self, readonly=False):
"""read_data
Reads non-database files and configures backend according to
the configfile.
"""
# Read the data based on the config
# self._read_experiment_layout()
# self._read_barcode_key()
# self._readWellMeasurements()
# self._read_cut_meta()
# self._read_roi_meta()
# self._read_measurement_data()
# self._read_stack_meta()
self._read_pannel()
backend = self.conf[config.BACKEND]
if readonly:
backend += READONLY
self.db_conn = self.connectors[backend](self.conf)
self.bro = bro.Bro(self)
def drop_all(self):
self.db_conn = self.connectors[self.conf[config.BACKEND]](self.conf)
db.drop_all(self.db_conn)
##########################################
# Helper functions used by readData: #
##########################################
def _read_experiment_layout(self):
"""
reads the experiment layout as stated in the config
and saves it in the datastore
"""
if self.conf[config.LAYOUT_CSV][config.PATH] is not None:
sep = self.conf[config.LAYOUT_CSV][config.SEP]
experiment_layout = pd.read_csv(
self.conf[config.LAYOUT_CSV][config.PATH], sep=sep
)
# rename the columns
rename_dict = {
self.conf[config.LAYOUT_CSV][c]: target
for c, target in [
(config.LAYOUT_CSV_COND_ID, db.conditions.condition_id.key),
(config.LAYOUT_CSV_COND_NAME, db.conditions.condition_name.key),
(config.LAYOUT_CSV_TIMEPOINT_NAME, db.conditions.time_point.key),
(config.LAYOUT_CSV_BARCODE, db.conditions.barcode.key),
(
config.LAYOUT_CSV_CONCENTRATION_NAME,
db.conditions.concentration.key,
),
(config.LAYOUT_CSV_BC_PLATE_NAME, db.conditions.bc_plate.key),
(config.LAYOUT_CSV_PLATE_NAME, db.conditions.plate_id.key),
(config.LAYOUT_CSV_WELL_NAME, db.conditions.well_name.key),
]
}
experiment_layout = experiment_layout.rename(columns=rename_dict)
self.experiment_layout = experiment_layout.fillna(0)
else:
self.experiment_layout = None
def _read_barcode_key(self):
"""
reads the barcode key as stated in the config
"""
conf_bc = self.conf[config.BARCODE_CSV]
conf_layout = self.conf[config.LAYOUT_CSV]
path = conf_bc[config.PATH]
if path is not None:
# Load the barcode key
sep = conf_bc[config.SEP]
barcodes = | pd.read_csv(path, sep=sep) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
synbioParts (c) University of Manchester 2019
synbioParts is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
Created on Fri May 31 13:38:19 2019
@author: <NAME>, SYNBIOCHEM
@description: Routines to interface with synbio repositories
- parts: list of pointers to SynBiohub, we assume that each part
(origin, resistance, promoter) has been already registered in the part registry;
- pathway: list of enzymes referred by either:
- SynbioHub identifier: useful if the part has been previously registered or
for gene variants (RBS library or mutants)
- UniProt identifier: a new empty container for this part is created in SynBioHub,
which will be later filled by the DNA design step
"""
import os
import re
import sbol2 as sbol
import requests
import time
import numpy as np
import pandas as pd
from .OptDes import getDoe
from .Args import (
DEFAULT_libsize,
DEFAULT_get_sequences,
DEFAULT_backtranslate,
DEFAULT_condon_table,
)
from logging import (
Logger,
getLogger,
DEBUG
)
logger = getLogger(__name__)
# logger.setLevel(DEBUG)
def doeSBOL(pfile='RefParts.csv', gfile='GeneParts.csv', libsize=32, ofile='out.sbol'):
"""
Perform the DoE and generate the SBOL file from the
parts and genes files
- RefParts.csv: Name, Type, Part
- GeneParts.csv: Name, Type, Part, Step
Type: origin, resistance, promoter, terminator, gene
Step: Enzyme step in the pathway (eventually could be implemented
for the other genetic parts)
"""
diagnostics, cons = getTheDoe(pfile,gfile,libsize)
doc = getSBOL(pfile,gfile,cons)
doc.write(ofile)
def doeGetSBOL(
gfile,
pfile='RefParts.csv',
gsbol=None,
libsize=DEFAULT_libsize,
getSequences=DEFAULT_get_sequences,
backtranslate=DEFAULT_backtranslate,
codontable=DEFAULT_condon_table
):
"""
Perform the DoE and generate the SBOL file from the
parts and genes files
- RefParts.csv: Name, Type, Part
Name: part name
Type: origin, resistance, promoter, terminator, gene
Part: SynBioHub URI
- GeneParts.csv: Name, Type, Part, Step, Sequence
Name: gene name
Type: gene
Step: Enzyme step in the pathway (eventually could be implemented
for the other genetic parts)
Part: identifier (UniProt, etc) or SynBioHub URI
Sequence: gene sequence (optional: if not given they are retrieved
from UniProt or SynBioHub)
- Gsbol: SBOL file containing optimised versions of the genes (RBS, etc)
"""
parts = pd.read_csv(pfile)
genes = pd.read_csv(gfile)
if gsbol is not None and os.path.exists(gsbol):
genes = _readGenesSBOL(gsbol, genes)
diagnostics, cons = getTheDoe(parts,genes,libsize)
doc = getSBOL(parts,genes,cons,getSequences,backtranslate,codontable, gsbol)
diagnostics['sbol'] = doc.writeString()
return diagnostics
def _readGenesSBOL(gsbol, genes, roles=[sbol.SO_GENE]):
""" Create a new gene table containing the genes in the SBOL """
partsdoc = sbol.Document()
partsdoc.read(gsbol)
gdict = {}
for i in genes.index:
gid = genes.loc[i,'Part']
if gid not in gdict:
gdict[gid] = []
gdict[gid].append( genes.loc[i,:] )
ngenes = []
for part in partsdoc.componentDefinitions:
if len( set(part.roles) & set(roles) ) > 0:
dispid = part.displayId
disp = dispid.split('_')
rbs = disp[-2]
cds = '_'.join( disp[0:(len(disp)-2)] )
if cds in gdict:
for row in gdict[cds]:
row = row.copy()
row.loc['Name'] = dispid
row.loc['Part'] = part.persistentIdentity
ngenes.append( row )
ngenes = | pd.DataFrame(ngenes, columns=genes.columns) | pandas.DataFrame |
"""
The Transform components allow transforming tables in arbitrary ways.
"""
import datetime as dt
import numpy as np
import pandas as pd
import panel as pn
import param
from ..util import resolve_module_reference
class Transform(param.Parameterized):
"""
A Transform provides the ability to transform a table supplied by
a Source.
"""
controls = param.List(default=[], doc="""
Parameters that should be exposed as widgets in the UI.""")
transform_type = None
_field_params = []
__abstract = True
@classmethod
def _get_type(cls, transform_type):
if '.' in transform_type:
return resolve_module_reference(transform_type, Transform)
try:
__import__(f'lumen.transforms.{transform_type}')
except Exception:
pass
for transform in param.concrete_descendents(cls).values():
if transform.transform_type == transform_type:
return transform
raise ValueError(f"No Transform for transform_type '{transform_type}' could be found.")
@classmethod
def from_spec(cls, spec):
"""
Resolves a Transform specification.
Parameters
----------
spec: dict
Specification declared as a dictionary of parameter values.
Returns
-------
The resolved Transform object.
"""
spec = dict(spec)
transform_type = Transform._get_type(spec.pop('type', None))
new_spec = {}
for k, v in spec.items():
if (k in transform_type.param and
isinstance(transform_type.param[k], param.ListSelector) and
not isinstance(v, list)):
v = [v]
new_spec[k] = v
return transform_type(**new_spec)
@classmethod
def apply_to(cls, table, **kwargs):
"""
Calls the apply method based on keyword arguments passed to define transform.
Parameters
----------
table: `pandas.DataFrame`
Returns
-------
A DataFrame with the results of the transformation.
"""
return cls(**kwargs).apply(table)
def apply(self, table):
"""
Given a table transform it in some way and return it.
Parameters
----------
table : DataFrame
The queried table as a DataFrame.
Returns
-------
DataFrame
A DataFrame containing the transformed data.
"""
return table
@property
def control_panel(self):
return pn.Param(
self.param, parameters=self.controls, sizing_mode='stretch_width',
margin=(-10, 0, 5, 0)
)
class HistoryTransform(Transform):
"""
The HistoryTransform accumulates a history of the queried data in
a buffer up to the supplied length and (optionally) adds a
date_column to the data.
"""
date_column = param.Selector(doc="""
If defined adds a date column with the supplied name.""")
length = param.Integer(default=10, bounds=(1, None), doc="""
Accumulates a history of data.""")
transform_type = 'history'
_field_params = ['date_column']
def __init__(self, **params):
super().__init__(**params)
self._buffer = []
def apply(self, table):
"""
Accumulates a history of the data in a buffer up to the
declared `length` and optionally adds the current datetime to
the declared `date_column`.
Parameters
----------
data : DataFrame
The queried table as a DataFrame.
Returns
-------
DataFrame
A DataFrame containing the buffered history of the data.
"""
if self.date_column:
table = table.copy()
table[self.date_column] = dt.datetime.now()
self._buffer.append(table)
self._buffer[:] = self._buffer[-self.length:]
return | pd.concat(self._buffer) | pandas.concat |
"""
Filters joint involvement data to treatment-naive patients diagnosed up to one
year after symptom onset who have joint involvement at baseline.
Patients must also not be withdrawn at any point from the study.
"""
import click
import feather
import pandas as pd
from logging import *
def load_data(basic_path: str,
medication_path: str,
joint_injection_path: str,
joint_path: str) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame,
pd.DataFrame):
"""
Loads data from the given paths.
Returns:
The loaded data.
"""
info('Reading basic information from {}'.format(basic_path))
basic_data = feather.read_dataframe(basic_path)
basic_data.info()
basic_data.set_index('subject_id', inplace=True)
info('Reading medications from {}'.format(medication_path))
medication_data = feather.read_dataframe(medication_path)
medication_data.info()
medication_data = medication_data.loc[medication_data['visit_id'] ==
1].set_index('subject_id')
info('Reading joint injections from {}'.format(joint_injection_path))
joint_injection_data = feather.read_dataframe(joint_injection_path)
joint_injection_data.info()
joint_injection_data = joint_injection_data.loc[joint_injection_data[
'visit_id'] == 1].set_index('subject_id')
info('Reading joint involvements from {}'.format(joint_path))
joint_data = feather.read_dataframe(joint_path)
joint_data.info()
return basic_data, medication_data, joint_injection_data, joint_data
def get_basic_masks(df: pd.DataFrame) -> pd.DataFrame:
"""
Obtains masks for the given basic data.
Args:
df: A table of basic information.
Returns:
The masks.
"""
mask_sex = pd.notnull(df['sex'])
info('{} patients had sex information'.format(mask_sex.sum()))
mask_dx = pd.notnull(df['diagnosis_6_months'])
info('{} patients had recorded diagnoses'.format(mask_dx.sum()))
mask_withdrawn = ~(df['withdrawn'])
info('{} patients were not withdrawn'.format(mask_withdrawn.sum()))
mask_symptom_onset_age = (df['symptom_onset_age'] > 0) & (
df['symptom_onset_age'] < 16)
info('{} patients were between 0 and 16 years of age at symptom onset'.
format(mask_symptom_onset_age.sum()))
mask_onset_to_diagnosis = (df['symptom_onset_to_diagnosis_days'] >= 0) & (
df['symptom_onset_to_diagnosis_days'] < 365)
info(('{} patients were diagnosed between 0 days and before one year '
'after symptom onset').format(mask_onset_to_diagnosis.sum()))
mask_basic = (mask_sex & mask_dx & mask_withdrawn &
mask_symptom_onset_age & mask_onset_to_diagnosis)
info('{} patients satisfied basic inclusion criteria'.format(
mask_basic.sum()))
return pd.DataFrame.from_items(
[('sex', mask_sex), ('diagnosis', mask_dx),
('withdrawn', mask_withdrawn),
('symptom_onset_age', mask_symptom_onset_age),
('onset_to_diagnosis', mask_onset_to_diagnosis),
('basic_combined', mask_basic)])
def get_medication_masks(medication_df: pd.DataFrame,
joint_injection_df: pd.DataFrame) -> pd.DataFrame:
"""
Obtains masks for the given medication data and joint injection data.
Args:
df: A table of medications.
Returns:
The masks.
"""
mask_dmards = medication_df['dmard_status'].isin(['NONE', 'NEW'])
info('{} patients were not previously on DMARDs'.format(mask_dmards.sum()))
mask_steroids = medication_df['steroid_status'].isin(['NONE', 'NEW'])
info('{} patients were not previously on steroids'.format(
mask_steroids.sum()))
mask_ivig = medication_df['ivig_status'].isin(['NONE', 'NEW'])
info('{} patients were not previously on IVIG'.format(mask_ivig.sum()))
mask_biologics = medication_df['biologic_status'].isin(['NONE', 'NEW'])
info('{} patients were not previously on biologics'.format(
mask_biologics.sum()))
mask_joint_injections = (
joint_injection_df['injection_status'] == 'NONE') | (
joint_injection_df['days_max'] < 0)
info('{} patients had no joint injections'.format(
mask_joint_injections.sum()))
mask_medications = (mask_dmards & mask_steroids & mask_ivig &
mask_biologics & mask_joint_injections)
info('{} patients satisfied medication requirements'.format(
mask_medications.sum()))
return pd.DataFrame.from_items(
[('dmards', mask_dmards), ('steroids', mask_steroids),
('ivig', mask_ivig), ('biologics', mask_biologics),
('joint_injections', mask_joint_injections),
('medications_combined', mask_medications)])
def get_joint_count_masks(df: pd.DataFrame) -> pd.DataFrame:
"""
Obtains a joint count mask.
Args:
df: A table of joint involvements.
Returns:
The mask.
"""
baseline_joint_counts = df.loc[df['visit_id'] == 1].drop(
'visit_id', axis=1).set_index('subject_id').sum(axis=1)
mask_joints = baseline_joint_counts > 0
info('{} patients had joints involved at baseline'.format(mask_joints.sum(
)))
return | pd.DataFrame.from_items([('joint_count', mask_joints)]) | pandas.DataFrame.from_items |
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module provides various model summary information.
This code is currently in "experimental" state, as we wait for the next release
of PyTorch with stable support for the JIT tracer functionality we employ in this
code (it was built with a 4.x master branch).
"""
import torch
import torchvision
from torch.autograd import Variable
import torch.jit as jit
import pandas as pd
from tabulate import tabulate
class SummaryGraph(object):
"""We use Pytorch's JIT tracer to run a forward pass and generate a trace graph, which
is an internal representation of the model. We then use ONNX to "clean" this
representation. After builiding a new representation of the graph, we can print
it to a table, a PNG, add information to nodes, etc.
The trace is a C++ component and the API is not documented, so we need to dig into some
Pytorch internals code to understand how to get the info we need.
https://github.com/pytorch/pytorch/blob/master/torch/onnx/__init__.py
https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic.py
We think that using the trace output to generate a representation of the graph, is
the best method available in Pytorch, due to the module's dynamic nature.
Pytorch's module API naturally ignores layers/operations which are implemented as
torch.autograd.Function, without an nn.Module. For example:
out = F.relu(self.bn1(self.conv1(x)))
Another case where traversing the nn.Module API is not sufficient to create a
representation of the graph, is the same nn.Module is used several times in the
graph. For example:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) <=== First use of self.relu
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out) <=== Second use of self.relu
"""
def __init__(self, model, dummy_input):
with torch.onnx.set_training(model, False):
trace, _ = jit.trace(model, dummy_input)
# Let ONNX do the heavy lifting: fusing the convolution nodes; fusing the nodes
# composing a GEMM operation; etc.
torch.onnx._optimize_trace(trace, False)
graph = trace.graph()
self.ops = []
self.params = {}
self.edges = []
self.temp = {}
in_out = list(graph.inputs()) + list(graph.outputs())
for param in in_out:
self.__add_param(param)
for node in graph.nodes():
op = {}
op['name'] = node.scopeName()
op['orig-name'] = node.scopeName()
op['type'] = node.kind()
op['inputs'] = []
op['outputs'] = []
op['params'] = []
# in-place operators create very confusing graphs (Resnet, for example),
# so we "unroll" them
same = [layer for layer in self.ops if layer['orig-name'] == op['orig-name']]
if len(same) > 0:
op['name'] += "." + str(len(same))
self.ops.append(op)
for input_ in node.inputs():
self.__add_input(op, input_)
self.edges.append((input_.uniqueName(), op['name']))
for output in node.outputs():
self.__add_output(op, output)
self.edges.append((op['name'], output.uniqueName()))
op['attrs'] = {attr_name: node[attr_name] for attr_name in node.attributeNames()}
def __add_input(self, op, n):
param = self.__add_param(n)
if param is None: return
if param['id'] not in op['inputs']:
op['inputs'].append(param['id'])
def __add_output(self, op, n):
param = self.__add_param(n)
if param is None: return
if param['id'] not in op['outputs']:
op['outputs'].append(param['id'])
def __add_param(self, n):
param = {}
if n.uniqueName() not in self.params:
param = self.__tensor_desc(n)
self.params[n.uniqueName()] = param
else:
param = self.params[n.uniqueName()]
return param
def __tensor_desc(self, n):
tensor = {}
tensor['id'] = n.uniqueName()
try:
s = str(n.type())
tensor['type'] = s[:s.find('(')]
s = s[s.find('(')+1: s.find(')')]
tensor['shape'] = tuple(map(lambda x: int(x), s.split(',')))
except:
return None
return tensor
def attributes_summary(sgraph, ignore_attrs):
"""Generate a summary of a graph's attributes.
Args:
sgraph: a SummaryGraph instance
ignore_attrs: a list of attributes to ignore in the output datafraem
Output:
A Pandas dataframe
"""
def pretty_val(val):
if type(val) == int:
return format(val, ",d")
return str(val)
def pretty_attrs(attrs, ignore_attrs):
ret = ''
for key, val in attrs.items():
if key in ignore_attrs:
continue
ret += key + ': ' + pretty_val(val) + '\n'
return ret
df = pd.DataFrame(columns=['Name', 'Type', 'Attributes'])
pd.set_option('precision', 5)
for i, op in enumerate(sgraph.ops):
df.loc[i] = [op['name'], op['type'], pretty_attrs(op['attrs'], ignore_attrs)]
return df
def attributes_summary_tbl(sgraph, ignore_attrs):
df = attributes_summary(sgraph, ignore_attrs)
return tabulate(df, headers='keys', tablefmt='psql')
def connectivity_summary(sgraph):
"""Generate a summary of each node's connectivity.
Args:
sgraph: a SummaryGraph instance
"""
df = pd.DataFrame(columns=['Name', 'Type', 'Inputs', 'Outputs'])
pd.set_option('precision', 5)
for i, op in enumerate(sgraph.ops):
df.loc[i] = [op['name'], op['type'], op['inputs'], op['outputs']]
return df
def connectivity_summary_verbose(sgraph):
"""Generate a summary of each node's connectivity, with details
about the parameters.
Args:
sgraph: a SummaryGraph instance
"""
def format_list(l):
ret = ''
for i in l: ret += str(i) + '\n'
return ret[:-1]
df = pd.DataFrame(columns=['Name', 'Type', 'Inputs', 'Outputs'])
| pd.set_option('precision', 5) | pandas.set_option |
""" This file originated from the online analysis project at:
https://github.com/OlafHaag/UCM-WebApp
"""
import itertools
import pandas as pd
import pingouin as pg
import numpy as np
from scipy.stats import wilcoxon
from sklearn.decomposition import PCA
from sklearn.covariance import EllipticEnvelope
def preprocess_data(users, blocks, trials):
""" Clean data.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:returns: Joined and recoded DataFrame. Number of erroneous blocks. Number of sessions removed as a consequence.
Number of removed trials.
:rtype: tuple[pandas.DataFrame, int, int, int]
"""
blocks, n_errors, invalid_sessions = remove_erroneous_blocks(blocks)
# Merge to 1 table.
df = join_data(users, blocks, trials)
# Remove invalid trials.
cleaned, n_trials_removed = get_valid_trials(df)
return cleaned, n_errors, len(invalid_sessions), n_trials_removed
def remove_erroneous_blocks(blocks, delta_time=2.0, n_blocks=3):
""" Remove sessions with erroneous data due to a NeuroPsy Research App malfunction.
The error causes block data to be duplicated and the values for df1 & df2 multiplied again by 100.
The duplicated blocks are identified by comparing their time stamps to the previous block (less than 2 seconds
difference). If the error caused the session to end early, the whole session is removed.
NeuroPsyResearchApp issue #1.
:param pandas.DataFrame blocks: Data about blocks.
:param float delta_time: Threshold in seconds for which a consecutive block in a session is considered invalid
if it was completed within this period after the previous. Default is 2.0 seconds.
:param int n_blocks: Required number of blocks per session. If a session doesn't have this many blocks,
it gets removed.
:returns: Cleaned block data. Number of errors found. List of sessions that were removed as a consequence.
:rtype: tuple[pandas.DataFrame, int, list]
"""
# Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart.
mask = blocks.groupby(['session_uid'])['time'].diff() < delta_time
try:
n_errors = mask.value_counts()[True]
except KeyError:
n_errors = 0
blocks = blocks.loc[~mask, :]
# Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session.
invalid_sessions = blocks['session_uid'].value_counts() != n_blocks
invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list()
blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :]
return blocks, n_errors, invalid_sessions
def join_data(users, blocks, trials):
""" Take data from different database tables and join them to a single DataFrame. Some variables are renamed and
recoded in the process, some are dropped.
:param users: Data from users table
:type users: pandas.DataFrame
:param blocks: Data from circletask_blocks table.
:type blocks: pandas.DataFrame
:param trials: Data from circletask_trials table.
:type trials: pandas.DataFrame
:return: Joined and recoded DataFrame.
:rtype: pandas.DataFrame
"""
# Use users' index instead of id for obfuscation and shorter display.
users_inv_map = pd.Series(users.index, index=users.id)
# Remove trials that don't belong to any block. Those have been excluded.
trials = trials.loc[trials['block_id'].isin(blocks.index), :]
# Start a new table for trials and augment with data from other tables.
df = pd.DataFrame(index=trials.index)
df['user'] = trials.user_id.map(users_inv_map).astype('category')
df['session'] = trials['block_id'].map(blocks['nth_session']).astype('category')
# Map whole sessions to the constraint in the treatment block as a condition for easier grouping during analysis.
df['condition'] = trials['block_id'].map(blocks[['session_uid', 'treatment']].replace(
{'treatment': {'': np.nan}}).groupby('session_uid')['treatment'].ffill().bfill()).astype('category')
df['block'] = trials['block_id'].map(blocks['nth_block']).astype('category')
# Add pre and post labels to trials for each block. Name it task instead of treatment.
# Theoretically, one could have changed number of blocks and order of treatment, but we assume default order here.
df['task'] = trials['block_id'].map(blocks['treatment'].replace('', np.nan).where(~blocks['treatment'].isna(),
blocks['nth_block'].map(
{1: 'pre',
3: 'post'
})
)
).astype('category')
#df['task'] = trials['block_id'].map(blocks['treatment'].replace(to_replace={r'\w+': 1, r'^\s*$': 0}, regex=True)
# ).astype('category')
df = pd.concat((df, trials), axis='columns')
# Add columns for easier filtering.
df['grab_diff'] = (df['df2_grab'] - df['df1_grab']).abs()
df['duration_diff'] = (df['df2_duration'] - df['df1_duration']).abs()
# Exclude columns.
df.drop(columns=['user_id'], inplace=True)
return df
def get_valid_trials(dataframe):
""" Remove trials where sliders where not grabbed concurrently or grabbed at all.
:param dataframe: Trial data.
:type dataframe: pandas.DataFrame
:returns: Filtered trials. Number of removed trials.
:rtype: tuple[pandas.DataFrame, int]
"""
# Remove trials with missing values. This means at least one slider wasn't grabbed.
df = dataframe.dropna(axis='index', how='any')
# Remove trials where sliders where not grabbed concurrently.
mask = ~((df['df1_release'] <= df['df2_grab']) | (df['df2_release'] <= df['df1_grab']))
df = df.loc[mask, :]
n_removed = len(dataframe) - len(df)
return df, n_removed
def get_outlyingness(data, contamination=0.1):
""" Outlier detection from covariance estimation in a Gaussian distributed dataset.
:param data: Data in which to detect outliers. Take care that n_samples > n_features ** 2 .
:type data: pandas.DataFrame
:param contamination: The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
:type contamination: float
:returns: Decision on each row if it's an outlier. And contour array for drawing ellipse in graph.
:rtype: tuple[numpy.ndarray, numpy.ndarray]
"""
robust_cov = EllipticEnvelope(support_fraction=1., contamination=contamination)
outlyingness = robust_cov.fit_predict(data)
decision = (outlyingness-1).astype(bool)
# Visualisation.
xx, yy = np.meshgrid(np.linspace(0, 100, 101),
np.linspace(0, 100, 101))
z = robust_cov.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
return decision, z
#ToDo: remove blocks/sessions with sum mean way off.
#ToDo: remove sessions with less than 10 trials in any block.
def get_performance_data(dataframe):
"""[summary]
:param dataframe: [description]
:type dataframe: [type]
"""
dataframe.groupby(['user', 'block', 'task'])[['df1', 'df2']].mean().dropna().sort_index(level=['user','block'])
def get_pca_data(dataframe):
""" Conduct Principal Component Analysis on 2D dataset.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:return: Explained variance, components and means.
:rtype: pandas.DataFrame
"""
# We don't reduce dimensionality, but overlay the 2 principal components in 2D.
pca = PCA(n_components=2)
x = dataframe[['df1', 'df2']].values
try:
# df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.
pca.fit(x)
except ValueError:
# Return empty.
df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])
else:
df = pd.DataFrame({'var_expl': pca.explained_variance_.T,
'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent
'x': pca.components_[:, 0],
'y': pca.components_[:, 1],
'meanx': pca.mean_[0],
'meany': pca.mean_[1],
},
index=[1, 2] # For designating principal components.
)
df.index.rename('PC', inplace=True)
return df
def get_pca_vectors(dataframe):
""" Get principal components as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Tabular PCA data.
:type dataframe: pandas.DataFrame
:return: Principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
# Use the "components" to define the direction of the vectors,
# and the "explained variance" to define the squared-length of the vectors.
directions = dataframe[['x', 'y']] * np.sqrt(dataframe[['var_expl']].values) * 3
# Move the directions by the mean, so we get vectors pointing to the start and vectors pointing to the destination.
vector2 = directions + dataframe[['meanx', 'meany']].values
vectors = list(zip(dataframe[['meanx', 'meany']].values, vector2.values))
return vectors
def get_pca_vectors_by(dataframe, by=None):
""" Get principal components for each group as vectors. Vectors can then be used to annotate graphs.
:param dataframe: Data holding 'df1' and 'df2' values as columns.
:type dataframe: pandas.DataFrame
:param by: Column to group data by and return 2 vectors for each group.
:type by: str|list
:return: list of principal components as vector pairs in input space with mean as origin first and offset second.
:rtype: list
"""
vector_pairs = list()
if by is None:
pca_df = get_pca_data(dataframe)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
else:
grouped = dataframe.groupby(by, observed=True) # With categorical groupers we want only non-empty groups.
for group, data in grouped:
pca_df = get_pca_data(data)
v = get_pca_vectors(pca_df)
vector_pairs.append(v)
# ToDo: Augment by groupby criteria.
return vector_pairs
def get_interior_angle(vec0, vec1):
""" Get the smaller angle between vec0 and vec1 in degrees.
:param vec0: Vector 0
:type vec0: numpy.ndarray
:param vec1: Vector 1
:type vec1: numpy.ndarray
:return: Interior angle between vector0 and vector1 in degrees.
:rtype: float
"""
angle = np.math.atan2(np.linalg.det([vec0, vec1]), np.dot(vec0, vec1))
degrees = abs(np.degrees(angle))
# Min and max should be between 0° an 90°.
degrees = min(degrees, 180.0 - degrees)
return degrees
def get_ucm_vec(p0=None, p1=None):
""" Returns 2D unit vector in direction of uncontrolled manifold. """
if p0 is None:
p0 = np.array([25, 100])
if p1 is None:
p1 = np.array([100, 25])
parallel = p1 - p0
parallel = parallel / np.linalg.norm(parallel) # Normalize.
return parallel
def get_orthogonal_vec2d(vec):
""" Get a vector that is orthogonal to vec and has same length.
:param vec: 2D Vector
:return: 2D Vector orthogonal to vec.
:rtype: numpy.ndarray
"""
ortho = np.array([-vec[1], vec[0]])
return ortho
def get_pc_ucm_angles(dataframe, vec_ucm):
""" Computes the interior angles between pca vectors and ucm parallel/orthogonal vectors.
:param dataframe: PCA data.
:type dataframe: pandas.DataFrame
:param vec_ucm: Vector parallel to UCM.
:type vec_ucm: numpy.ndarray
:return: Each angle between principal components and UCM parallel and orthogonal vector.
:rtype: pandas.DataFrame
"""
df_angles = dataframe[['x', 'y']].transform(lambda x: (a:=get_interior_angle(vec_ucm, x), 90.0 - a),
axis='columns').rename(columns={'x': 'parallel', 'y': 'orthogonal'})
df_angles = pd.concat((dataframe[['task', 'PC']], df_angles), axis='columns')
return df_angles
def get_projections(points, vec_ucm):
""" Returns coefficients a and b in x = a*vec_ucm + b*vec_ortho with x being the difference of a data point and
the mean.
Projection is computed using a transformation matrix with ucm parallel and orthogonal vectors as basis.
:param points: Data of 2D points.
:type points: pandas.Dataframe
:param vec_ucm: Unit vector parallel to uncontrolled manifold.
:type vec_ucm: numpy.ndarray
:return: Array with projected lengths onto vector parallel to UCM as 'a', onto vector orthogonal to UCM as 'b'.
:rtype: pandas.Dataframe
"""
# Get the vector orthogonal to the UCM.
vec_ortho = get_orthogonal_vec2d(vec_ucm)
# Build a transformation matrix with vec_ucm and vec_ortho as new basis vectors.
A = np.vstack((vec_ucm, vec_ortho)).T # A is not an orthogonal projection matrix (A=A.T), but this works.
# Centralize the data. Analogous to calculating across trials deviation from average for each time step.
diffs = points - points.mean()
# For computational efficiency we shortcut the projection calculation with matrix multiplication.
# The actual math behind it:
# coeffs = vec_ucm.T@diff/np.sqrt(vec_ucm.T@vec_ucm), vec_ortho.T@diff/np.sqrt(vec_ortho.T@vec_ortho)
# Biased variance (normalized by (n-1)) of projection onto UCM vector:
# var_ucm = [email protected](diffs, bias=True, rowvar=False)@vec_ucm/(vec_ucm.T@vec_ucm) # Rayleigh fraction.
coeffs = diffs@A
coeffs.columns = ['parallel', 'orthogonal']
return coeffs
def get_synergy_indices(variances, n=2, d=1):
"""
n: Number of degrees of freedom. In our case 2.
d: Dimensionality of performance variable. In our case a scalar (1).
Vucm = 1/N * 1/(n - d) * sum(ProjUCM**2)
Vort = 1/N * 1/(d) * sum(ProjORT**2)
Vtotal = 1/n * (d * Vort + (n-d) * Vucm) # Anull the weights on Vucm and Vort for the sum.
dV = (Vucm - Vort) / Vtotal
dV = n*(Vucm - Vort) / ((n - d)*Vucm + d*Vort)
Zhang (2008) without weighting Vucm, Vort and Vtotal first:
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
dVz = 0.5*ln((n / d + dV) / (n / ((n - d) - dV))
dVz = 0.5*ln((2 + dV) / (2 - dV))
Reference: https://www.frontiersin.org/articles/10.3389/fnagi.2019.00032/full#supplementary-material
:param variances: Unweighted variances of parallel and orthogonal projections to the UCM.
:type variances: pandas.DataFrame
:param n: Number of degrees of freedom. Defaults to 2.
:type: int
:param d: Dimensionality of performance variable. Defaults to 1.
:type d: int
:returns: Synergy index, Fisher's z-transformed synergy index.
:rtype: pandas.DataFrame
"""
try:
dV = n * (variances['parallel']/(n-d) - variances['orthogonal']/d) \
/ variances[['parallel', 'orthogonal']].sum(axis='columns')
except KeyError:
synergy_indices = pd.DataFrame(columns=["dV", "dVz"])
else:
dVz = 0.5 * np.log((n/d + dV)/(n/(n-d) - dV))
synergy_indices = pd.DataFrame({"dV": dV, "dVz": dVz})
return synergy_indices
def get_synergy_idx_bounds(n=2, d=1):
""" Get lower and upper bounds of the synergy index.
dV = n * (Vucm/(n - d) - Vort/d) / (Vucm + Vort)
If all variance lies within the UCM, then Vort=0 and it follows for the upper bound: dV = n/(n-d)
If all variance lies within Vort, then Vucm=0 and it follows for the lower bound: dV = -n/d
:param n: Number of degrees of freedom.
:type: int
:param d: Dimensionality of performance variable.
:type d: int
:returns: Lower and upper bounds of synergy index.
:rtype: tuple
"""
dV_lower = -n/d
dV_upper = n/(n-d)
return dV_lower, dV_upper
def get_mean(dataframe, column, by=None):
""" Return mean values of column x (optionally grouped)
:param dataframe: Data
:type dataframe: pandas.Dataframe
:param column: Column name
:type column: str
:param by: Column names by which to group.
:type by: str|list
:return: mean value, optionally for each group.
:rtype: numpy.float64|pandas.Series
"""
if by is None:
means = dataframe[column].mean()
else:
means = dataframe.groupby(by, observed=True)[column].mean()
return means
def get_descriptive_stats(data, by=None):
""" Return mean and variance statistics for data.
:param data: numerical data.
:type data: pandas.Dataframe
:param by: groupby column name(s)
:type by: str|List
:return: Dataframe with columns mean, var, count and column names of data as rows.
:rtype: pandas.Dataframe
"""
# There's a bug in pandas 1.0.4 where you can't use custom numpy functions in agg anymore (ValueError).
# Note that the variance of projections is usually divided by (n-d) for Vucm and d for Vort. Both are 1 in our case.
# Pandas default var returns unbiased population variance /(n-1). Doesn't make a difference for synergy indices.
f_var = lambda series: series.var(ddof=0)
f_var.__name__ = 'variance' # Column name gets function name.
# When there're no data, return empty DataFrame with columns.
if data.empty:
if by:
data.set_index(by, drop=True, inplace=True)
col_idx = pd.MultiIndex.from_product([data.columns, ['mean', f_var.__name__]])
stats = pd.DataFrame(None, index=data.index, columns=col_idx)
stats['count'] = None
return stats
if not by:
stats = data.agg(['mean', f_var, 'count']).T
stats['count'] = stats['count'].astype(int)
else:
grouped = data.groupby(by, observed=True)
stats = grouped.agg(['mean', f_var])
stats['count'] = grouped.size()
stats.dropna(inplace=True)
return stats
def get_statistics(dataframe):
""" Calculate descriptive statistics including synergy indices for key values of the anaylsis.
:param dataframe: Data from joined table on trials with projections.
:type dataframe: pandas.DataFrame
:return: Descriptive statistics and synergy indices.
:rtype: pandas.DataFrame
"""
groupers = ['user', 'session', 'condition', 'block_id', 'block', 'task']
try:
dataframe[groupers] = dataframe[groupers].astype('category')
except (KeyError, ValueError):
df_stats = get_descriptive_stats(pd.DataFrame(columns=dataframe.columns))
cov = pd.DataFrame(columns=[('df1,df2 covariance', '')])
else:
df_stats = get_descriptive_stats(dataframe[groupers + ['df1', 'df2', 'sum', 'parallel', 'orthogonal']],
by=groupers).drop(columns=[('parallel', 'mean'), # Always equal 0.
('orthogonal', 'mean')])
# Get statistic characteristics of absolute lengths of projections.
length = dataframe.groupby(groupers, observed=True)[['parallel', 'orthogonal']].agg(lambda x: x.abs().mean())
length.columns = pd.MultiIndex.from_product([length.columns, ['absolute average']])
# Get covariance between degrees of freedom.
cov = dataframe.groupby(groupers, observed=True)[['df1', 'df2']].apply(lambda x: np.cov(x.T, ddof=0)[0, 1])
try:
cov = cov.to_frame(('df1,df2 covariance', '')) # MultiIndex.
except AttributeError: # In case cov is an empty Dataframe.
cov = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('df1,df2 covariance', '')]))
# Get synergy indices based on projection variances we just calculated.
df_synergies = get_synergy_indices(df_stats[['parallel', 'orthogonal']].xs('variance', level=1, axis='columns'))
# Before we merge dataframes, give this one a Multiindex, too.
df_synergies.columns = | pd.MultiIndex.from_product([df_synergies.columns, ['']]) | pandas.MultiIndex.from_product |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: | to_datetime(result[c]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 10 14:16:55 2022
@author: <NAME>
"""
import time
import pandas as pd
import colorcet as cc
import numpy as np
import datetime as dt
from bokeh.io import show, curdoc
from bokeh.plotting import figure
from bokeh.themes import built_in_themes
from bokeh.layouts import layout, column, gridplot, row
from bokeh.models import CustomJS, ColumnDataSource, CDSView, DateRangeSlider, Select, BoxSelectTool, HoverTool, \
CrosshairTool, VArea, Patch, Patches, BoxZoomTool, Div, HArea, IndexFilter, ColorBar, Span, Label, RadioButtonGroup
from bokeh.transform import linear_cmap, LinearColorMapper
from bokeh.palettes import Spectral6, GnBu, mpl, brewer, all_palettes, Viridis256, Cividis256, Turbo256, Viridis, Cividis, cividis, viridis, inferno, linear_palette
# -----clean and format the data
raw_data = pd.read_csv("US_Energy.csv", delimiter=",", na_values=('--'))
cleaned_data = raw_data.drop([13, 15, 17, 19], axis=0)
cleaned_data = cleaned_data.drop(['remove', 'units', 'source key', 'category'], axis=1)
cleaned_data = cleaned_data.transpose()
new_header = cleaned_data.iloc[0] # grab the first row for the header
cleaned_data = cleaned_data[1:] # take the data less the header row
cleaned_data.columns = new_header
for (columnName, columnData) in cleaned_data.iteritems():
cleaned_data["Change in " + columnName] = | pd.to_numeric(cleaned_data[columnName]) | pandas.to_numeric |
from json import load
from matplotlib.pyplot import title
from database.database import DbClient
from discord import Embed
import pandas as pd
from util.data import load_data
class Analytics:
def __init__(self, server_id: str, db):
self.server_id = server_id
self.db = db
@staticmethod
def no_data_embed(topic: str) -> Embed:
"""CREATE AN EMBED IF NO DATA WAS COLLECTED"""
embed = Embed(title="SORRY", description=f"Sorry, but there were no `{topic}` data collected on this server!")
return embed
async def analyze_message(self):
"""ANALYZE THE MESSAGE DATA"""
data = await load_data(self.db, self.server_id)
data = data["message"]
if len(data) == 0:
return self.no_data_embed("message")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
channelid_counts = pd.value_counts(df["channelid"])
role_counts = pd.value_counts(df["roles"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message data"),
Embed(title=embed_title, description="Message counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message send from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_delete(self):
"""ANALYZE MESSAGE DELETE"""
data = await load_data(self.db, self.server_id)
data = data["message_delete"]
if len(data) == 0:
return self.no_data_embed("message delete")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message delete ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message delete counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message delete from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message delete counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message delete counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_message_edit(self):
"""ANALYZE MESSAGE EDIT"""
data = await load_data(self.db, self.server_id)
data = data["message_edit"]
if len(data) == 0:
return self.no_data_embed("message edit")
# ANALYZE THE DATA
df = pd.DataFrame(data)
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = pd.value_counts(df["hours"])
weekday_count = pd.value_counts(df["weekday"])
embed_title = "Message edit ~ Analytics"
embeds = [
Embed(title=embed_title, description="Here you can see the analyzed message edit data"),
Embed(title=embed_title, description="Message edits counted in channels:\n"f"```{channelid_counts}```"),
Embed(title=embed_title, description="Message edits from roles:\n"f"```{role_counts}```"),
Embed(title=embed_title, description="Message edits counted in which hours:\n"f"```{hours_count}```"),
Embed(title=embed_title, description="Message edits counted on which weekday:\n"f"```{weekday_count}```")
]
return embeds
async def analyze_reaction(self):
"""ANALYZE THE REACTION DATA"""
data = await load_data(self.db, self.server_id)
data = data["reaction"]
if len(data) == 0:
return self.no_data_embed("reaction")
# ANALYZE THE DATA:
df = pd.DataFrame(data)
name_count = pd.value_counts(df["reactionname"])
role_counts = pd.value_counts(df["roles"])
channelid_counts = pd.value_counts(df["channelid"])
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["hours"] = df["timestamp"].dt.hour
df["weekday"] = df["timestamp"].dt.day_name()
hours_count = | pd.value_counts(df["hours"]) | pandas.value_counts |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import re
from copy import copy as copy_obj
from numbers import Integral
from typing import Type, Sequence
import numpy as np
import pandas as pd
from pandas._libs import lib
from pandas.api.indexers import check_array_indexer
from pandas.api.types import (
pandas_dtype,
is_scalar,
is_array_like,
is_string_dtype,
is_list_like,
)
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
)
from pandas.arrays import StringArray as StringArrayBase
from pandas.core import ops
from pandas.core.algorithms import take
from pandas.compat import set_function_name
try:
from pandas._libs.arrays import NDArrayBacked
except ImportError:
NDArrayBacked = None
try:
import pyarrow as pa
pa_null = pa.NULL
except ImportError: # pragma: no cover
pa = None
pa_null = None
from ..config import options
from ..core import is_kernel_mode
from ..lib.version import parse as parse_version
from ..utils import tokenize
_use_bool_any_all = parse_version(pd.__version__) >= parse_version("1.3.0")
class ArrowDtype(ExtensionDtype):
@property
def arrow_type(self): # pragma: no cover
raise NotImplementedError
def __from_arrow__(self, array):
return self.construct_array_type()(array)
@register_extension_dtype
class ArrowStringDtype(ArrowDtype):
"""
Extension dtype for arrow string data.
.. warning::
ArrowStringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, ArrowStringDtype.na_value may change to no longer be
``numpy.nan``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> import mars.dataframe as md
>>> md.ArrowStringDtype()
ArrowStringDtype
"""
type = str
kind = "U"
name = "Arrow[string]"
na_value = pa_null
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls) -> "Type[ArrowStringArray]":
return ArrowStringArray
@property
def arrow_type(self):
return pa.string()
@register_extension_dtype
class ArrowStringDtypeAlias(ArrowStringDtype):
name = "arrow_string" # register an alias name for compatibility
class ArrowListDtypeType(type):
"""
the type of ArrowListDtype, this metaclass determines subclass ability
"""
pass
class ArrowListDtype(ArrowDtype):
_metadata = ("_value_type",)
def __init__(self, dtype):
if isinstance(dtype, type(self)):
dtype = dtype.value_type
if pa and isinstance(dtype, pa.DataType):
dtype = dtype.to_pandas_dtype()
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype) and not isinstance(dtype, ArrowStringDtype):
# convert string dtype to arrow string dtype
dtype = ArrowStringDtype()
self._value_type = dtype
@property
def value_type(self):
return self._value_type
@property
def kind(self):
return "O"
@property
def type(self):
return ArrowListDtypeType
@property
def name(self):
return f"Arrow[List[{self.value_type.name}]]"
@property
def arrow_type(self):
if isinstance(self._value_type, ArrowDtype):
arrow_subdtype = self._value_type.arrow_type
else:
arrow_subdtype = pa.from_numpy_dtype(self._value_type)
return pa.list_(arrow_subdtype)
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> "Type[ArrowListArray]":
return ArrowListArray
@classmethod
def construct_from_string(cls, string):
msg = f"Cannot construct a 'ArrowListDtype' from '{string}'"
xpr = re.compile(r"Arrow\[List\[(?P<value_type>[^,]*)\]\]$")
m = xpr.match(string)
if m:
value_type = m.groupdict()["value_type"]
return ArrowListDtype(value_type)
else:
raise TypeError(msg)
@classmethod
def is_dtype(cls, dtype) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str):
try:
cls.construct_from_string(dtype)
except TypeError:
return False
else:
return True
else:
return isinstance(dtype, cls)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not isinstance(other, ArrowListDtype):
return False
value_type = self._value_type
other_value_type = other._value_type
try:
return value_type == other_value_type
except TypeError:
# cannot compare numpy dtype and extension dtype
return other_value_type == value_type
class ArrowArray(ExtensionArray):
_arrow_type = None
def __init__(self, values, dtype: ArrowDtype = None, copy=False):
pandas_only = self._pandas_only()
if pa is not None and not pandas_only:
self._init_by_arrow(values, dtype=dtype, copy=copy)
elif not is_kernel_mode():
# not in kernel mode, allow to use numpy handle data
# just for infer dtypes purpose
self._init_by_numpy(values, dtype=dtype, copy=copy)
else:
raise ImportError(
"Cannot create ArrowArray " "when `pyarrow` not installed"
)
# for test purpose
self._force_use_pandas = pandas_only
def _init_by_arrow(self, values, dtype: ArrowDtype = None, copy=False):
if isinstance(values, (pd.Index, pd.Series)):
# for pandas Index and Series,
# convert to PandasArray
values = values.array
if isinstance(values, type(self)):
arrow_array = values._arrow_array
elif isinstance(values, ExtensionArray):
# if come from pandas object like index,
# convert to pandas StringArray first,
# validation will be done in construct
arrow_array = pa.chunked_array([pa.array(values, from_pandas=True)])
elif isinstance(values, pa.ChunkedArray):
arrow_array = values
elif isinstance(values, pa.Array):
arrow_array = pa.chunked_array([values])
else:
arrow_array = pa.chunked_array([pa.array(values, type=dtype.arrow_type)])
if copy:
arrow_array = copy_obj(arrow_array)
self._use_arrow = True
self._arrow_array = arrow_array
if NDArrayBacked is not None and isinstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, np.array([]), dtype)
else:
self._dtype = dtype
def _init_by_numpy(self, values, dtype: ArrowDtype = None, copy=False):
self._use_arrow = False
ndarray = np.array(values, copy=copy)
if NDArrayBacked is not None and isinstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, ndarray, dtype)
else:
self._dtype = dtype
self._ndarray = np.array(values, copy=copy)
@classmethod
def _pandas_only(cls):
return options.dataframe.arrow_array.pandas_only
def __repr__(self):
return f"{type(self).__name__}({repr(self._array)})"
@property
def _array(self):
return self._arrow_array if self._use_arrow else self._ndarray
@property
def dtype(self) -> "Type[ArrowDtype]":
return self._dtype
@property
def nbytes(self) -> int:
if self._use_arrow:
return sum(
x.size
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
)
else:
return self._ndarray.nbytes
@property
def shape(self):
if self._use_arrow:
return (self._arrow_array.length(),)
else:
return self._ndarray.shape
def memory_usage(self, deep=True) -> int:
if self._use_arrow:
return self.nbytes
else:
return | pd.Series(self._ndarray) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# %%
import lightgbm as lgb
import pandas as pd
import numpy as np
import time
from sklearn.metrics import accuracy_score
# %%
print('Loading all data...')
start = time.time()
all_train_data = pd.read_csv(
'data/train_preliminary/clicklog_ad_user_train_eval_test.csv')
df_test = pd.read_csv('data/test/clicklog_ad.csv')
print('Split data into train and validation...')
TRAIN_DATA_PERCENT = 0.9
msk = np.random.rand(len(all_train_data)) < TRAIN_DATA_PERCENT
df_train = all_train_data[msk]
df_val = all_train_data[~msk]
feature_columns = df_train.columns.values.tolist()
feature_columns.remove('age')
feature_columns.remove('gender')
label_age, label_gender = ['age'], ['gender']
X_train = df_train[feature_columns]
y_train_gender = df_train[label_gender]
# set label 0 and 1
y_train_gender.gender = y_train_gender.gender-1
y_train_age = df_train[label_age]
y_train_age.age = y_train_age.age-1
X_val = df_val[feature_columns]
y_val_gender = df_val[label_gender]
y_val_gender.gender = y_val_gender.gender-1
y_val_age = df_val[label_age]
y_val_age.age = y_val_age.age-1
X_test = df_test[feature_columns]
print('Loading data uses {:.1f}s'.format(time.time()-start))
categorical_feature = ['industry', 'advertiser_id',
'product_category', 'product_id', 'ad_id', 'creative_id', 'user_id']
# 构建性别数据
lgb_train_gender = lgb.Dataset(
X_train, y_train_gender, feature_name=feature_columns, categorical_feature=categorical_feature)
lgb_eval_gender = lgb.Dataset(
X_val, y_val_gender, reference=lgb_train_gender, feature_name=feature_columns, categorical_feature=categorical_feature)
# 构建年龄数据
lgb_train_age = lgb.Dataset(
X_train, y_train_age, feature_name=feature_columns, categorical_feature=categorical_feature)
lgb_eval_age = lgb.Dataset(
X_val, y_val_age, reference=lgb_train_age, feature_name=feature_columns, categorical_feature=categorical_feature)
# %%
# write to hdf5 to read fast
X_train.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='X_train', mode='w')
y_train_gender.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_train_gender', mode='a')
y_train_age.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_train_age', mode='a')
X_val.to_hdf('data/clicklog_ad_user_train_eval_test.h5', key='X_val', mode='a')
y_val_gender.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_val_gender', mode='a')
y_val_age.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_val_age', mode='a')
X_test.to_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='X_test', mode='a')
# %%
# read from hdf5
X_train = pd.read_hdf(
'data/clicklog_ad_user_train_eval_test.h5', key='X_train', mode='r')
y_train_gender = pd.read_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_train_gender', mode='r')
y_train_age = pd.read_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_train_age', mode='r')
X_val = pd.read_hdf(
'data/clicklog_ad_user_train_eval_test.h5', key='X_val', mode='r')
y_val_gender = pd.read_hdf('data/clicklog_ad_user_train_eval_test.h5',
key='y_val_gender', mode='r')
y_val_age = pd.read_hdf(
'data/clicklog_ad_user_train_eval_test.h5', key='y_val_age', mode='r')
X_test = pd.read_hdf(
'data/clicklog_ad_user_train_eval_test.h5', key='X_test', mode='r')
# %%
def LGBM_gender():
params_gender = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss', 'binary_error'}, # evaluate指标
'max_depth': -1, # 不限制树深度
# 更高的accuracy
'max_bin': 2**10-1,
'num_leaves': 2**10,
'min_data_in_leaf': 1,
'learning_rate': 0.01,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.8,
# 'bagging_freq': 5,
# 'is_provide_training_metric': True,
'verbose': 1
}
print('Start training...')
# train
gbm = lgb.train(params_gender,
lgb_train_gender,
num_boost_round=10,
valid_sets=lgb_eval_gender,
# early_stopping_rounds=5,
)
print('training done!')
print('Saving model...')
# save model to file
gbm.save_model('tmp/model_gender.txt')
print('save model done!')
return gbm
# %%
def LGBM_age():
params_age = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
"num_class": 10,
# fine-tuning最重要的三个参数
'num_leaves': 2**10-1,
'max_depth': -1, # 不限制树深度
'min_data_in_leaf': 1,
# 更高的accuracy
# 'max_bin': 2**9-1,
'metric': {'multi_logloss', 'multi_error'},
'learning_rate': 0.1,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.8,
# 'bagging_freq': 5,
'verbose': 1
}
print('Start training...')
# train
gbm = lgb.train(params_age,
lgb_train_age,
num_boost_round=50,
valid_sets=lgb_eval_age,
# early_stopping_rounds=5,
)
print('Saving model...')
# save model to file
gbm.save_model('tmp/model_age.txt')
print('save model done!')
return gbm
# %%
gbm_gender = LGBM_gender()
gbm_age = LGBM_age()
# gbm_gender = lgb.Booster(model_file='tmp/model_gender.txt')
# gbm_age = lgb.Booster(model_file='tmp/model_age.txt')
# %%
def evaluate():
print('Start predicting...')
y_pred_gender_probability = gbm_gender.predict(
X_val, num_iteration=gbm_gender.best_iteration)
threshold = 0.5
y_pred_gender = np.where(y_pred_gender_probability > threshold, 1, 0)
# eval
print('threshold: {:.1f} The accuracy of prediction is:{:.2f}'.format(threshold,
accuracy_score(y_val_gender, y_pred_gender)))
# %%
print('Start evaluate data predicting...')
y_pred_age_probability = gbm_age.predict(
X_val, num_iteration=gbm_age.best_iteration)
y_pred_age = np.argmax(y_pred_age_probability, axis=1)
# eval
print('The accuracy of prediction is:{:.2f}'.format(
accuracy_score(y_val_age, y_pred_age)))
d = {'user_id': X_val.user_id.values.tolist(), 'gender': y_pred_gender.tolist(),
'age': y_pred_age.tolist()}
ans_df = pd.DataFrame(data=d)
# 投票的方式决定gender、age
ans_df_grouped = ans_df.groupby(['user_id']).agg(
lambda x: x.value_counts().index[0])
ans_df_grouped.gender = ans_df_grouped.gender+1
ans_df_grouped.age = ans_df_grouped.age+1
ans_df_grouped.to_csv('data/ans.csv', header=True)
# %%
evaluate()
# %%
def test():
print('Start predicting test gender data ...')
y_pred_gender_probability = gbm_gender.predict(
X_test, num_iteration=gbm_gender.best_iteration)
threshold = 0.5
y_pred_gender = np.where(y_pred_gender_probability > threshold, 1, 0)
print('Start predicting test age data ...')
y_pred_age_probability = gbm_age.predict(
X_test, num_iteration=gbm_age.best_iteration)
y_pred_age = np.argmax(y_pred_age_probability, axis=1)
print('start voting...')
d = {'user_id': X_test.user_id.values.tolist(),
'predicted_age': y_pred_age.tolist(),
'predicted_gender': y_pred_gender.tolist(),
}
ans_df = | pd.DataFrame(data=d) | pandas.DataFrame |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function, unicode_literals)
import argparse
import requests
import time
import datetime
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.parser import parse
from collections import defaultdict
try:
from urllib import urlencode, quote, unquote
except ImportError:
from urllib.parse import urlencode, quote, unquote
list_of_commands = ('tv', 'season=<>')
help_text = 'commands=%s,[number]' % ','.join(list_of_commands)
# additional_channels = ('BBCA', 'WCBS', 'WNBC', 'WNYW', 'WABC', 'FREEFRM')
additional_channels = ('BBCA', 'WNYW', 'FREEFRM', 'FXX')
veto_channels = ('Fox', 'MyNetwork', 'ABCF', 'HALMRK', 'WGNAMER')
proxy_endpoint = '/browse.php?u='
proxy_uri = 'http://openwebproxy.pw' + proxy_endpoint
def t_request(endpoint):
timeout = 1
while True:
try:
resp = requests.get(endpoint, timeout=60)
resp.raise_for_status()
return resp
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as exc:
print('timeout %s, %s' % (timeout, exc))
if '404 Client Error: Not Found for url:' in exc.message:
raise
time.sleep(timeout)
timeout *= 2
if timeout >= 64:
raise
def get_available_dates_channels(zip_code=None, tv_prov=None):
# zip_code=10026, tv_prov='NY31534'
endpoint = 'http://www.imdb.com/tvgrid'
if zip_code is not None and tv_prov is not None:
endpoint += '?' + urlencode({'zip': zip_code, 'tv_prov': tv_prov})
resp = t_request(endpoint)
if resp.status_code != 200:
raise Exception('bad status %s' % resp.status_code)
soup = BeautifulSoup(resp.text, 'html.parser')
available_dates = []
available_channels = []
for s in soup.find_all('select'):
if hasattr(s, 'attrs') and 'name' in s.attrs and s.attrs['name'] == 'start_date':
available_dates = [o.attrs['value'] for o in s.find_all('option')]
available_dates = map(lambda x: parse(x).date(), available_dates)
if hasattr(s, 'attrs') and 'name' in s.attrs and s.attrs['name'] == 'channel':
available_channels = [
o.attrs['value'].strip('#') for o in s.find_all('option') if '#' in o.attrs['value']
]
return available_dates, available_channels
def get_time_program_list(date=datetime.date.today(), channel='AMC'):
endpoint = 'http://www.imdb.com/tvgrid/%s/%s' % (str(date), channel)
resp = t_request(endpoint)
soup = BeautifulSoup(resp.text, 'html.parser')
for table in soup.find_all('table'):
if 'class' not in table.attrs:
for tr_idx, tr in enumerate(table.find_all('tr')):
(start_time, start_time_url, title, desc, imdb_title, imdb_url, ep_title,
ep_url) = 8 * ['']
for td_idx, td in enumerate(tr.find_all('td')):
if td_idx == 0:
start_time = td.text
try:
start_time_url = [
int(a.attrs['href'].split('/')[3]) for a in td.find_all('a')
][0]
except ValueError:
start_time_url = sum(
int(x) * 100**i
for i, x in enumerate(td.text.split()[0].split(':')[::-1]))
else:
title = list(td.find_all('b'))[0].text
for a in td.find_all('a'):
if not imdb_title:
imdb_title = a.text
imdb_url = a.attrs['href'].split('/')[2]
elif not ep_title:
ep_title = a.text
ep_url = a.attrs['href'].split('/')[2]
desc = td.text.replace(title, '').strip()
if ep_title:
desc = desc.replace(ep_title, '').strip()
yield {
'start_int': start_time_url,
'start_str': start_time,
'title': title,
'desc': desc,
'imdb_title': imdb_title,
'imdb_url': imdb_url,
'ep_title': ep_title,
'ep_url': ep_url
}
def get_time_from_grid(date=datetime.date.today(), start_time='0000', channels=None):
last_date = (
datetime.datetime.combine(date, datetime.time()) + datetime.timedelta(days=-1)).date()
resp = t_request('http://www.imdb.com/tvgrid/%s/%s' % (str(date), start_time))
soup = BeautifulSoup(resp.text, 'html.parser')
shows = {}
for div in soup.find_all('div'):
if 'tv_channel' in div.attrs.get('class', {}):
channel_name = [
a.attrs['name'] for x in div.find_all('div')
if 'tv_callsign' in x.attrs.get('class', {}) for a in x.find_all('a')
][0]
if channels is not None and channel_name not in channels:
continue
for li in div.find_all('li'):
imdb_title, imdb_url = 2 * ['']
id_ = li.attrs['id'].replace('_show', '_info')
start_time_ = li.attrs['id'].replace(channel_name, '').replace('_show', '')
ampm = start_time_[-2:]
hr = int(start_time_[:-2]) // 100
mn = int(start_time_[:-2]) % 100
if start_time == '0000' and ampm == 'PM':
start_time_ = parse('%s %02d:%02d %s EST' % (last_date, hr, mn, ampm))
else:
start_time_ = parse('%s %02d:%02d %s EST' % (date, hr, mn, ampm))
for d in li.find_all('div'):
if 'tv_title' not in d.attrs.get('class', {}):
continue
imdb_title = d.text.strip()
for a in d.find_all('a'):
imdb_title = a.attrs['title'].strip()
imdb_url = a.attrs['href']
shows[id_] = {
'channel': channel_name,
'start_time': start_time_,
'title': imdb_title,
'imdb_title': imdb_title,
'imdb_url': imdb_url,
'ep_title': '',
'ep_url': ''
}
elif 'tv_phantom' in div.attrs.get('class', {}):
id_ = div.table.attrs.get('id', '')
if id_ in shows:
for a in div.find_all('a'):
url = a.attrs['href']
if shows[id_]['imdb_url'] != url:
shows[id_]['ep_url'] = url
shows[id_]['ep_title'] = a.text.strip()
return shows.values()
def parse_imdb_tv_listings(additional_channels=additional_channels):
available_dates, available_channels = get_available_dates_channels()
available_channels = set(available_channels)
available_channels |= set(additional_channels)
available_channels -= set(veto_channels)
dataframes = []
for channel in available_channels:
tmp_dfs = []
for date in available_dates:
if date < datetime.date.today():
continue
print('channel %s date %s' % (channel, date))
last_date = (datetime.datetime.combine(date, datetime.time()) +
datetime.timedelta(days=-1)).date()
time_prog_list = list(get_time_program_list(date, channel=channel))
df = pd.DataFrame(time_prog_list)
df['start_time'] = df.start_str.apply(lambda x: parse('%s %s EST' % (date, x)))
if df.shape[0] > 1:
if df.start_int[0] > df.start_int[1]:
df.ix[0, 'start_time'] = parse('%s %s EST' % (last_date, df.start_str[0]))
df['end_time'] = df.loc[1:, 'start_time'].reset_index(drop=True)
else:
df['end_time'] = pd.NaT
df['channel'] = channel
tmp_dfs.append(df)
df = pd.concat(tmp_dfs)
df = df.sort_values(by=['start_time']).reset_index(drop=True)
idx = df[df.end_time.isnull()].index
nidx = idx + 1
df.loc[df.index.isin(idx[:-1]), 'end_time'] = df[df.index.isin(nidx)].start_time.values
df = df[df.end_time.notnull()]
dataframes.append(df)
df = pd.concat(dataframes)
df = df[[
'channel', 'start_time', 'end_time', 'title', 'imdb_title', 'imdb_url', 'ep_title', 'ep_url'
]].reset_index(drop=True)
return df
def get_bad_channels(available_dates, bad_channels):
dataframes = []
for date in available_dates:
for start_time in ['%04d' % (x * 100) for x in range(0, 24, 3)]:
print(date, start_time)
time_prog_list = get_time_from_grid(
date=date, start_time=start_time, channels=bad_channels)
df_ = pd.DataFrame(time_prog_list)
dataframes.append(df_)
df_ = pd.concat(dataframes)
df_ = df_[['channel', 'start_time', 'title', 'imdb_title', 'imdb_url', 'ep_title',
'ep_url']].sort_values(by=['channel', 'start_time']).reset_index(drop=True)
dataframes = []
for channel in df_.channel.unique():
tmp_df = df_[df_.channel == channel].reset_index(drop=True)
if tmp_df.shape[0] > 1:
tmp_df['end_time'] = tmp_df.loc[1:, 'start_time'].reset_index(drop=True)
else:
tmp_df['start_time'] = pd.NaT
dataframes.append(tmp_df)
df_ = | pd.concat(dataframes) | pandas.concat |
import sys
import numpy as np
import faiss
import math
import argparse
import pandas as pd
class Initializer:
def __init__(self, args):
self.args = args
self.long_long_size = 8
self.long_size = 4
self.int_size = 2
self.output_file_binary_components = args.binary_components
self.input_file_items_ids = args.items_ids
if self.output_file_binary_components is None:
print('One must provide [--binary_components], [--encoded_items] or [--index_file].'
+ '\r\n' + 'Please use [-h] or [--help] for more information.')
sys.exit(1)
self.input_file_index = args.index_file
self.input_file_encoded_items = args.encoded_items
if self.input_file_encoded_items is None and self.input_file_index is None:
print('One must provide [--encoded_items] or [--index_file].'
+ '\r\n' + 'Please use [-h] or [--help] for more information.')
sys.exit(1)
self.component_type_size = args.component_type_size
if self.component_type_size is self.long_long_size:
self.data_type = np.ulonglong
elif self.component_type_size is self.long_size:
self.data_type = np.uint32
elif self.component_type_size is self.int_size:
self.data_type = np.uint16
else:
self.component_type_size = self.long_long_size
self.data_type = np.ulonglong
if self.input_file_encoded_items is not None:
self.encoded_items_vec = np.load(self.input_file_encoded_items)
else:
self.encoded_items_vec = None
self.input_file_training_set = args.training_set
if self.input_file_training_set is None:
self.training_set_vec = self.encoded_items_vec
else:
self.training_set_vec = np.load(self.input_file_training_set)
self.number_of_bits = args.number_of_bits
if self.input_file_index is None:
_, self.dimensions = self.encoded_items_vec.shape
if self.number_of_bits is None:
self.number_of_bits = self.dimensions * 2
self.lsh_index = self.get_lsh_index()
else:
self.lsh_index = faiss.read_index(self.input_file_index)
self.dimensions = None
def get_lsh_index(self):
index = faiss.IndexLSH(self.dimensions, self.number_of_bits)
index.train(self.training_set_vec)
index.add(self.encoded_items_vec)
return index
class Binarizer:
def __init__(self, initializer):
self.initializer = initializer
def __get_binary_components(self):
print('Type size: ' + str(self.initializer.component_type_size) + ' bytes.')
bytes_per_vector = self.initializer.lsh_index.bytes_per_vec
print('Bytes per vector: ' + str(bytes_per_vector))
print('Number of bits: ' + str(bytes_per_vector * 8))
vectors_bytes = faiss.vector_to_array(self.initializer.lsh_index.codes)
n_components = math.ceil(bytes_per_vector / self.initializer.component_type_size)
print('Number of binary components per vector: ' + str(n_components))
n = int(len(vectors_bytes) / bytes_per_vector)
resulting_components = np.zeros((n, n_components), dtype=self.initializer.data_type)
vector_start = 0
vector_stop = bytes_per_vector
vector_count = 0
while vector_count < n:
binary_vector = np.zeros((n_components,), dtype=self.initializer.data_type)
vector_bytes = vectors_bytes[vector_start:vector_stop]
component_start = 0
component_stop = self.initializer.component_type_size
component_count = 0
while component_count < n_components:
numerical_component_bytes = vector_bytes[component_start:component_stop]
numerical_component = int.from_bytes(numerical_component_bytes, byteorder='big', signed=False)
binary_vector[component_count] = numerical_component
component_start = component_stop
component_stop = component_stop + self.initializer.component_type_size
component_count += 1
resulting_components[vector_count] = binary_vector
vector_start = vector_stop
vector_stop = vector_stop + bytes_per_vector
vector_count += 1
print('Total items: ' + str(len(resulting_components)))
return resulting_components
def __zip_with_ids(self, components):
ids = np.load(self.initializer.input_file_items_ids)
id_series = pd.Series(ids, name='item_id')
bin_vec_series = pd.Series(components.tolist(), name='binary_vector')
frame = {'item_id': id_series, 'binary_vector': bin_vec_series}
df = | pd.DataFrame(frame) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import psycopg2 as sql
import pandas as pd
db = sql.connect(
database='IMDb',
user='username',
password = 'password'
)
c = db.cursor()
def media_scores(q_tvEpisode, q_short, q_movie, q_video, q_tvMovie, q_tvSeries, whichplot='overall'):
c.execute(q_tvEpisode)
rows = c.fetchall()
tv_Episode_data = pd.DataFrame(rows, columns=['year_produced', 'Average_Score', 'st_dev_score'])
c.execute(q_short)
rows = c.fetchall()
short_data = | pd.DataFrame(rows, columns=['year_produced', 'Average_Score', 'st_dev_score']) | pandas.DataFrame |
import numpy as np
from numpy.random import seed
seed(1)
import pandas as pd
from math import sqrt
from sklearn.decomposition import PCA
######################################################################
# METRICS
######################################################################
def mse(y, y_hat):
"""
Calculates Mean Squared Error.
MSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MSE
"""
mse = np.mean(np.square(y - y_hat))
return mse
def rmse(y, y_hat):
"""
Calculates Root Mean Squared Error.
RMSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Finally the RMSE will be in the same scale
as the original time series so its comparison with other
series is possible only if they share a common scale.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: RMSE
"""
rmse = sqrt(np.mean(np.square(y - y_hat)))
return rmse
def mape(y, y_hat):
"""
Calculates Mean Absolute Percentage Error.
MAPE measures the relative prediction accuracy of a
forecasting method by calculating the percentual deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MAPE
"""
mape = np.mean(np.abs(y - y_hat) / np.abs(y))
mape = 100 * mape
return mape
def smape(y, y_hat):
"""
Calculates Symmetric Mean Absolute Percentage Error.
SMAPE measures the relative prediction accuracy of a
forecasting method by calculating the relative deviation
of the prediction and the true value scaled by the sum of the
absolute values for the prediction and true value at a
given time, then averages these devations over the length
of the series. This allows the SMAPE to have bounds between
0% and 200% which is desireble compared to normal MAPE that
may be undetermined.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: SMAPE
"""
smape = np.mean(np.abs(y - y_hat) / (np.abs(y) + np.abs(y_hat)))
smape = 200 * smape
return smape
def mase(y, y_hat, y_train, seasonality=1):
"""
Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean absolute errors
of the prediction and the true value against the mean
absolute errors of the seasonal naive model.
y: numpy array
actual test values
y_hat: numpy array
predicted values
y_train: numpy array
actual train values for Naive1 predictions
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: MASE
"""
scale = np.mean(abs(y_train[seasonality:] - y_train[:-seasonality]))
mase = np.mean(abs(y - y_hat)) / scale
mase = 100 * mase
return mase
def rmsse(y, y_hat, y_train, seasonality=1):
"""
Calculates the M5 Root Mean Squared Scaled Error.
Calculates the M4 Mean Absolute Scaled Error.
MASE measures the relative prediction accuracy of a
forecasting method by comparinng the mean squared errors
of the prediction and the true value against the mean
squared errors of the seasonal naive model.
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: RMSSE
"""
scale = np.mean(np.square(y_train[seasonality:] - y_train[:-seasonality]))
rmsse = sqrt(mse(y, y_hat) / scale)
rmsse = 100 * rmsse
return rmsse
def pinball_loss(y, y_hat, tau=0.5):
"""
Calculates the Pinball Loss.
The Pinball loss measures the deviation of a quantile forecast.
By weighting the absolute deviation in a non symmetric way, the
loss pays more attention to under or over estimation.
A common value for tau is 0.5 for the deviation from the median.
y: numpy array
actual test values
y_hat: numpy array of len h (forecasting horizon)
predicted values
tau: float
Fixes the quantile against which the predictions are compared.
return: pinball_loss
"""
delta_y = y - y_hat
pinball = np.maximum(tau * delta_y, (tau-1) * delta_y)
pinball = pinball.mean()
return pinball_loss
def evaluate_panel(y_test, y_hat, y_train,
metric, seasonality):
"""
Calculates a specific metric for y and y_hat
y_test: pandas df
df with columns unique_id, ds, y
y_hat: pandas df
df with columns unique_id, ds, y_hat
y_train: pandas df
df with columns unique_id, ds, y (train)
this is used in the scaled metrics
seasonality: int
main frequency of the time series
Hourly 24, Daily 7, Weekly 52,
Monthly 12, Quarterly 4, Yearly 1
return: list of metric evaluations for each unique_id
in the panel data
"""
metric_name = metric.__code__.co_name
uids = y_test.index.get_level_values('unique_id').unique()
y_hat_uids = y_hat.index.get_level_values('unique_id').unique()
assert len(y_test)==len(y_hat), "not same length"
assert all(uids == y_hat_uids), "not same u_ids"
idxs, evaluations = [], []
for uid in uids:
y_test_uid = y_test.loc[uid].values
y_hat_uid = y_hat.loc[uid].values
y_train_uid = y_train.loc[uid].y.values
if metric_name in ['mase', 'rmsse']:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid,
y_train=y_train_uid, seasonality=seasonality)
else:
evaluation_uid = metric(y=y_test_uid, y_hat=y_hat_uid)
idxs.append(uid)
evaluations.append(evaluation_uid)
idxs = pd.Index(idxs, name='unique_id')
evaluations = | pd.Series(evaluations, index=idxs) | pandas.Series |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
from datetime import datetime
from typing import cast, List
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import pytz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from kats.compat.pandas import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from kats.consts import (
DEFAULT_TIME_NAME,
DEFAULT_VALUE_NAME,
TimeSeriesData,
TSIterator,
)
def load_data(file_name: str) -> pd.DataFrame:
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
# pyre-fixme[6]: For 1st param expected `bytes` but got `Optional[bytes]`.
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
TIME_COL_NAME = "ds"
VALUE_COL_NAME = "y"
MULTIVAR_VALUE_DF_COLS: List[str] = [VALUE_COL_NAME, VALUE_COL_NAME + "_1"]
EMPTY_DF = pd.DataFrame()
EMPTY_TIME_SERIES = pd.Series([], name=DEFAULT_TIME_NAME, dtype=float)
EMPTY_VALUE_SERIES = pd.Series([], name=DEFAULT_VALUE_NAME, dtype=float)
EMPTY_VALUE_SERIES_NO_NAME = pd.Series([], dtype=float)
EMPTY_TIME_DATETIME_INDEX = pd.DatetimeIndex(pd.Series([], dtype=object))
EMPTY_DF_WITH_COLS: pd.DataFrame = pd.concat([EMPTY_TIME_SERIES, EMPTY_VALUE_SERIES], axis=1)
NUM_YEARS_OFFSET = 12
class TimeSeriesBaseTest(TestCase):
def setUp(self) -> None:
# load Dataframes for testing
self.AIR_DF = load_data("air_passengers.csv")
self.AIR_DF_DATETIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_DATETIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.AIR_DF_UNIXTIME = self.AIR_DF.copy(deep=True)
self.AIR_DF_UNIXTIME.ds = self.AIR_DF_DATETIME.ds.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_DF_WITH_DEFAULT_NAMES = self.AIR_DF.copy(deep=True)
self.AIR_DF_WITH_DEFAULT_NAMES.columns = [DEFAULT_TIME_NAME, DEFAULT_VALUE_NAME]
self.MULTIVAR_AIR_DF = self.AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF[VALUE_COL_NAME + "_1"] = self.MULTIVAR_AIR_DF.y * 2
self.MULTIVAR_AIR_DF_DATETIME = self.MULTIVAR_AIR_DF.copy(deep=True)
self.MULTIVAR_AIR_DF_DATETIME.ds = self.MULTIVAR_AIR_DF_DATETIME.ds.apply(
lambda x: parser.parse(x)
)
self.MULTIVAR_VALUE_DF = self.MULTIVAR_AIR_DF[MULTIVAR_VALUE_DF_COLS]
self.AIR_TIME_SERIES = self.AIR_DF.ds
self.AIR_TIME_SERIES_PD_DATETIME = pd.to_datetime(self.AIR_TIME_SERIES)
self.AIR_TIME_SERIES_UNIXTIME = self.AIR_TIME_SERIES_PD_DATETIME.apply(
lambda x: (x - datetime(1970, 1, 1)).total_seconds()
)
self.AIR_VALUE_SERIES = self.AIR_DF[VALUE_COL_NAME]
self.AIR_TIME_DATETIME_INDEX = pd.DatetimeIndex(self.AIR_TIME_SERIES)
class TimeSeriesDataInitTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataInitTest, self).setUp()
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_datetime = TimeSeriesData(
df=self.AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from a pd.DataFrame with time
# as unix time
self.ts_from_df_with_unix = TimeSeriesData(
df=self.AIR_DF_UNIXTIME,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_from_df_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# Multivariate TimeSeriesData initialized from a pd.DataFrame with time
# as a datetime.datetime object
self.ts_from_df_multi_datetime = TimeSeriesData(
df=self.MULTIVAR_AIR_DF_DATETIME, time_col_name=TIME_COL_NAME
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a string
self.ts_from_series_univar_no_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as a pd.Timestamp
self.ts_from_series_univar_with_datetime = TimeSeriesData(
time=self.AIR_TIME_SERIES_PD_DATETIME, value=self.AIR_VALUE_SERIES
)
# Univariate TimeSeriesData initialized from two pd.Series with time
# as unix time
self.ts_from_series_with_unix = TimeSeriesData(
time=self.AIR_TIME_SERIES_UNIXTIME,
value=self.AIR_VALUE_SERIES,
use_unix_time=True,
unix_time_units="s",
time_col_name=TIME_COL_NAME,
)
# Univariate TimeSeriesData initialized with time as a pd.Series and
# value as a pd.DataFrame
self.ts_from_series_and_df_univar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.to_frame()
)
# Multivariate TimeSeriesData initialized from a pd.Series for time
# and DataFrame for value
self.ts_from_series_and_df_multivar = TimeSeriesData(
time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF
)
# Univariate TimeSeriesData initialized with time as a pd.DateTimeIndex
# and value as a pd.Series
self.ts_from_index_and_series_univar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.AIR_VALUE_SERIES,
time_col_name=TIME_COL_NAME,
)
# Multivariate TimeSeriesData initialized with time as a
# pd.DateTimeIndex and value as a pd.DataFrame
self.ts_from_index_and_series_multivar = TimeSeriesData(
time=self.AIR_TIME_DATETIME_INDEX,
value=self.MULTIVAR_VALUE_DF,
time_col_name=TIME_COL_NAME,
)
# TimeSeriesData initialized from None Objects
self.ts_df_none = TimeSeriesData(df=None)
self.ts_time_none_and_value_none = TimeSeriesData(time=None, value=None)
# TimeSeriesData initialized from Empty Objects
self.ts_df_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_time_empty_value_empty = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES
)
self.ts_time_empty_value_empty_no_name = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_VALUE_SERIES_NO_NAME
)
self.ts_time_empty_value_empty_df = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF
)
self.ts_time_empty_value_empty_df_with_cols = TimeSeriesData(
time=EMPTY_TIME_SERIES, value=EMPTY_DF_WITH_COLS
)
# univariate data with missing time
self.ts_univariate_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value": [1, 2, 3, 4],
}
)
)
# multivariate data with missing time
self.ts_multi_missing = TimeSeriesData(
df=pd.DataFrame(
{
"time": ["2010-01-01", "2010-01-02", "2010-01-03", "2010-01-05"],
"value1": [1, 2, 3, 4],
"value2": [4, 3, 2, 1],
}
)
)
# univariate data with unixtime in US/Pacific with time zone
self.unix_list = (
(
pd.date_range(
"2020-03-01", "2020-03-10", tz="US/Pacific", freq="1d"
).astype(int)
/ 1e9
)
.astype(int)
.to_list()
)
self.ts_univar_PST_tz = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# univariate data with unixtime in US/Pacific without time zone
self.ts_univar_PST = TimeSeriesData(
df=pd.DataFrame({"time": self.unix_list, "value": [0] * 10}),
use_unix_time=True,
unix_time_units="s",
)
# univariate data with date str with tz
date = ["2020-10-31", "2020-11-01", "2020-11-02"]
self.ts_univar_str_date_tz = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
tz="US/Pacific",
)
# univariate data with date str without tz
self.ts_univar_str_date = TimeSeriesData(
df=pd.DataFrame({"time": date, "value": [0] * 3}),
date_format="%Y-%m-%d",
)
# univariate data in US/Pacific Time Zone with missing data
self.ts_univar_PST_missing_tz = TimeSeriesData(
df=pd.DataFrame(
{"time": (self.unix_list[0:4] + self.unix_list[7:10]), "value": [0] * 7}
),
use_unix_time=True,
unix_time_units="s",
tz="US/Pacific",
)
# Testing univariate time series intialized from a DataFrame
def test_init_from_df_univar(self) -> None:
# DataFrame with string time
assert_series_equal(self.ts_from_df.time, self.AIR_TIME_SERIES_PD_DATETIME)
assert_series_equal(
cast(pd.Series, self.ts_from_df.value), self.AIR_VALUE_SERIES
)
# DataFrame with datetime time
assert_series_equal(
self.ts_from_df_datetime.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_datetime.value), self.AIR_VALUE_SERIES
)
# DataFrame with unix time
assert_series_equal(
self.ts_from_df_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_df_with_unix.value), self.AIR_VALUE_SERIES
)
# Testing multivariate time series initialized from a DataFrame
def test_init_from_df_multi(self) -> None:
assert_series_equal(
self.ts_from_df_multi.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_df_multi.value), self.MULTIVAR_VALUE_DF
)
# Testing univariate time series initialized from a Series and Series/DataFrame
def test_init_from_series_univar(self) -> None:
# time and value from Series, with time as string
assert_series_equal(
self.ts_from_series_univar_no_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
# time and value from Series, with time as pd.Timestamp
assert_series_equal(
self.ts_from_series_univar_with_datetime.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_univar_no_datetime.value),
self.AIR_VALUE_SERIES,
)
# time and value from Series, with time out of order and `sort_by_time=True`
unsorted_df = self.AIR_DF.sample(frac=1)
resorted_ts = TimeSeriesData(
time=unsorted_df.ds,
value=unsorted_df.y,
time_col_name=TIME_COL_NAME,
sort_by_time=True,
)
self.assertEqual(resorted_ts, self.ts_from_df)
# time and value from Series, with time as unix time
assert_series_equal(
self.ts_from_series_with_unix.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_series_with_unix.value), self.AIR_VALUE_SERIES
)
# time from Series and value from DataFrame
assert_series_equal(
self.ts_from_series_and_df_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
print(type(self.ts_from_series_and_df_univar.value))
assert_series_equal(
cast(pd.Series, self.ts_from_series_and_df_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series initialized from a Series/DataFrame
def test_init_from_series_multivar(self) -> None:
# Testing multivariate time series initialized from a
assert_series_equal(
self.ts_from_series_and_df_multivar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_series_and_df_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing univariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_univar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_univar.time, self.AIR_TIME_SERIES_PD_DATETIME
)
assert_series_equal(
cast(pd.Series, self.ts_from_index_and_series_univar.value),
self.AIR_VALUE_SERIES,
)
# Testing multivariate time series with time initialized as a
# pd.DateTimeIndex
def test_init_from_index_multivar(self) -> None:
assert_series_equal(
self.ts_from_index_and_series_multivar.time,
self.AIR_TIME_SERIES_PD_DATETIME,
)
assert_frame_equal(
cast(pd.DataFrame, self.ts_from_index_and_series_multivar.value),
self.MULTIVAR_VALUE_DF,
)
# Testing initialization from None Objects
def test_none(self) -> None:
# Testing initialization from None DataFrame
assert_series_equal(self.ts_df_none.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_none.value), EMPTY_VALUE_SERIES)
# Testing initialization from two None Series
assert_series_equal(self.ts_time_none_and_value_none.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_none_and_value_none.value), EMPTY_VALUE_SERIES
)
# Testing initialization from Empty Objects
def test_empty(self) -> None:
# Testing intialization from empty DataFrame
assert_series_equal(self.ts_df_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(cast(pd.Series, self.ts_df_empty.value), EMPTY_VALUE_SERIES)
# Testing intialization from two empty Series
assert_series_equal(self.ts_time_empty_value_empty.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty.value), EMPTY_VALUE_SERIES
)
# Testing intialization from two empty no name Series
assert_series_equal(
self.ts_time_empty_value_empty_no_name.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_no_name.value),
EMPTY_VALUE_SERIES,
)
# Make sure the time and value objects here have the default names
self.assertEqual(
self.ts_time_empty_value_empty_no_name.time.name, DEFAULT_TIME_NAME
)
self.assertEqual(
self.ts_time_empty_value_empty_no_name.value.name, DEFAULT_VALUE_NAME
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(self.ts_time_empty_value_empty_df.time, EMPTY_TIME_SERIES)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df.value), EMPTY_VALUE_SERIES
)
# Testing initialization from time as empty Series and value as empty
# DataFrame
assert_series_equal(
self.ts_time_empty_value_empty_df_with_cols.time, EMPTY_TIME_SERIES
)
assert_series_equal(
cast(pd.Series, self.ts_time_empty_value_empty_df_with_cols.value),
EMPTY_VALUE_SERIES,
)
# Testing incorrect initializations
def test_incorrect_init_types(self) -> None:
# Incorrect initialization with DF
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Optional[pd.core.frame.DataFrame]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(df=[])
# Incorrect initialization with value
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=None)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None, pd.core.frame.DataFrame,
# pd.core.series.Series]` for 2nd param but got `List[Variable[_T]]`.
TimeSeriesData(time=self.AIR_TIME_SERIES, value=[])
# Incorrect initialization with time
with self.assertRaises(ValueError):
TimeSeriesData(time=None, value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=self.AIR_VALUE_SERIES)
# Incorrect initialization with time and value
with self.assertRaises(ValueError):
# pyre-fixme[6]: Expected `Union[None,
# pd.core.indexes.datetimes.DatetimeIndex, pd.core.series.Series]` for 1st
# param but got `List[Variable[_T]]`.
TimeSeriesData(time=[], value=[])
# Incorrect initialization with value dtypes
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES.map(str))
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF.applymap(str))
# Testing incorrect initializations
def test_incorrect_init_lengths(self) -> None:
# Incorrect initialization with different length time and values
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.AIR_VALUE_SERIES[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.AIR_VALUE_SERIES)
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES, value=self.MULTIVAR_VALUE_DF[:-1])
with self.assertRaises(ValueError):
TimeSeriesData(time=self.AIR_TIME_SERIES[:-1], value=self.MULTIVAR_VALUE_DF)
# Testing DataFrame conversion
def test_to_dataframe(self) -> None:
# Univariate case
assert_frame_equal(self.ts_from_df.to_dataframe(), self.AIR_DF_DATETIME)
# Multivariate case
assert_frame_equal(
self.ts_from_df_multi_datetime.to_dataframe(), self.MULTIVAR_AIR_DF_DATETIME
)
# Series Cases
assert_frame_equal(
self.ts_from_series_univar_no_datetime.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_univar_with_datetime.to_dataframe(),
self.AIR_DF_DATETIME,
)
# Series/DataFrame Cases
assert_frame_equal(
self.ts_from_series_and_df_univar.to_dataframe(), self.AIR_DF_DATETIME
)
assert_frame_equal(
self.ts_from_series_and_df_multivar.to_dataframe(),
self.MULTIVAR_AIR_DF_DATETIME,
)
# Empty/None Cases
assert_frame_equal(self.ts_df_none.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_none_and_value_none.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(self.ts_df_empty.to_dataframe(), EMPTY_DF_WITH_COLS)
assert_frame_equal(
self.ts_time_empty_value_empty.to_dataframe(), EMPTY_DF_WITH_COLS
)
assert_frame_equal(
self.ts_time_empty_value_empty_df.to_dataframe(), EMPTY_DF_WITH_COLS
)
# Testing Data Interpolate
def test_interpolate(self) -> None:
# univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3, 4],
}
)
),
)
self.assertEqual(
self.ts_univariate_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 4, 4],
}
)
),
)
# multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="ffill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3, 4],
"value2": [4, 3, 2, 2, 1],
}
)
),
)
self.assertEqual(
self.ts_multi_missing.interpolate(freq="D", method="bfill"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 4, 4],
"value2": [4, 3, 2, 1, 1],
}
)
),
)
# test with no frequency given univariate
self.assertEqual(
self.ts_univariate_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value": [1, 2, 3, 3.5, 4],
}
)
),
)
# no frequency given, for multivariate
self.assertEqual(
self.ts_multi_missing.interpolate(method="linear"),
TimeSeriesData(
pd.DataFrame(
{
"time": [
"2010-01-01",
"2010-01-02",
"2010-01-03",
"2010-01-04",
"2010-01-05",
],
"value1": [1, 2, 3, 3.5, 4],
"value2": [4, 3, 2, 1.5, 1],
}
)
),
)
def test_to_array(self) -> None:
# Univariate case
np.testing.assert_array_equal(
self.ts_from_df.to_array(), self.AIR_DF_DATETIME.to_numpy()
)
# Multivariate case
np.testing.assert_array_equal(
self.ts_from_df_multi_datetime.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Series Cases
np.testing.assert_array_equal(
self.ts_from_series_univar_no_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_univar_with_datetime.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
# Series/DataFrame Cases
np.testing.assert_array_equal(
self.ts_from_series_and_df_univar.to_array(),
self.AIR_DF_DATETIME.to_numpy(),
)
np.testing.assert_array_equal(
self.ts_from_series_and_df_multivar.to_array(),
self.MULTIVAR_AIR_DF_DATETIME.to_numpy(),
)
# Empty/None Cases
np.testing.assert_array_equal(self.ts_df_none.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_none_and_value_none.to_array(), np.empty
)
np.testing.assert_array_equal(self.ts_df_empty.to_array(), np.empty)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty.to_array(), np.empty
)
np.testing.assert_array_equal(
self.ts_time_empty_value_empty_df.to_array(), np.empty
)
def test_tz(self) -> None:
self.ts_univar_PST_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(self.ts_univar_PST_tz.freq_to_timedelta(), pd.Timedelta("1d"))
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
self.assertTrue(
(
np.array(self.unix_list)
== (self.ts_univar_PST_tz.time.values.astype(int) / 1e9).astype(int)
).all()
)
with self.assertRaisesRegex(
ValueError, "Only constant frequency is supported for time!"
):
self.ts_univar_PST.validate_data(
validate_frequency=True, validate_dimension=True
)
self.ts_univar_str_date.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date.freq_to_timedelta(), pd.Timedelta("1d")
)
self.ts_univar_str_date_tz.validate_data(
validate_frequency=True, validate_dimension=True
)
self.assertEqual(
self.ts_univar_str_date_tz.freq_to_timedelta(), pd.Timedelta("1d")
)
self.assertEqual(self.ts_univar_PST_tz.tz(), pytz.timezone("US/Pacific"))
# test ambiguous
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2018-10-28 01:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 02:00:00",
"2018-10-28 02:30:00",
"2018-10-28 03:00:00",
"2018-10-28 03:30:00",
],
"value": [0] * 7,
}
),
tz="CET",
tz_ambiguous="infer",
)
tsd.validate_data(validate_frequency=True, validate_dimension=True)
# test nonexistent
tsd = TimeSeriesData(
df=pd.DataFrame(
{
"time": [
"2020-03-08 02:00:00",
"2020-03-08 02:30:00",
"2020-03-08 03:00:00",
],
"value": [0] * 3,
}
),
tz="US/Pacific",
tz_nonexistent="shift_forward",
)
def test_infer_freq_robust(self) -> None:
self.assertEqual(
self.ts_univariate_missing.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
self.assertEqual(
self.ts_univar_PST_missing_tz.infer_freq_robust(),
pd.Timedelta(value=1, unit="D"),
)
def test_is_data_missing(self) -> None:
self.assertEqual(self.ts_univariate_missing.is_data_missing(), True)
self.assertEqual(self.ts_univar_PST_missing_tz.is_data_missing(), True)
self.assertEqual(self.ts_from_series_and_df_univar.is_data_missing(), False)
self.assertEqual(self.ts_from_series_and_df_multivar.is_data_missing(), False)
def test_min_max_values(self) -> None:
# test min/max value for univariate
self.assertEqual(self.ts_from_df.min, np.nanmin(self.ts_from_df.value.values))
self.assertEqual(self.ts_from_df.max, np.nanmax(self.ts_from_df.value.values))
# test min/max value for multivariate
self.assertEqual(
# pyre-fixme[16]: `float` has no attribute `equals`.
self.ts_from_df_multi.min.equals(
self.ts_from_df_multi.value.min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
self.ts_from_df_multi.max.equals(
self.ts_from_df_multi.value.max(skipna=True)
),
True,
)
# test min/max value for empty TS
empty_ts = TimeSeriesData(pd.DataFrame())
self.assertEqual(np.isnan(empty_ts.min), True)
self.assertEqual(np.isnan(empty_ts.max), True)
# test if min/max changes if values are re-assigned for univariate
ts_from_df_new = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
new_val = np.random.randn(len(self.AIR_DF))
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.min(new_val))
self.assertEqual(ts_from_df_new.max, np.max(new_val))
# test if min/max changes if values are re-assigned with NaNs for univariate
new_val[-1] = np.nan
ts_from_df_new.value = pd.Series(new_val)
self.assertEqual(ts_from_df_new.min, np.nanmin(new_val))
self.assertEqual(ts_from_df_new.max, np.nanmax(new_val))
# test min/max changes if values are re-assigned for multivariate
ts_from_df_multi_new = TimeSeriesData(
self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
new_val_multi = np.random.randn(
self.MULTIVAR_VALUE_DF.shape[0], self.MULTIVAR_VALUE_DF.shape[1] - 1
)
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(pd.DataFrame(new_val_multi).min()),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(pd.DataFrame(new_val_multi).max()),
True,
)
# test min/max changes if values are re-assigned with NaNs for multivariate
new_val_multi[0] = np.nan
ts_from_df_multi_new.value = pd.DataFrame(new_val_multi)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.min.equals(
pd.DataFrame(new_val_multi).min(skipna=True)
),
True,
)
self.assertEqual(
# pyre-fixme[16]: Item `float` of `Union[float, Series]` has no
# attribute `equals`.
ts_from_df_multi_new.max.equals(
pd.DataFrame(new_val_multi).max(skipna=True)
),
True,
)
class TimeSeriesDataOpsTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataOpsTest, self).setUp()
# Creating DataFrames
# DataFrame with date offset
transformed_df_date = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET)
)
transformed_df_date_concat = self.AIR_DF.append(
transformed_df_date, ignore_index=True
)
transformed_df_date_double = self.AIR_DF_DATETIME.copy(deep=True)
transformed_df_date_double.ds = transformed_df_date.ds.apply(
lambda x: x + relativedelta(years=NUM_YEARS_OFFSET * 2)
)
transformed_df_date_concat_double = self.AIR_DF.append(
transformed_df_date_double, ignore_index=True
)
# DataFrames with value offset
transformed_df_value = self.AIR_DF.copy(deep=True)
transformed_df_value.y = transformed_df_value.y.apply(lambda x: x * 2)
transformed_df_value_inv = self.AIR_DF.copy(deep=True)
transformed_df_value_inv.y = transformed_df_value_inv.y.apply(lambda x: x * -1)
# DataFrame with date and value offset
transformed_df_date_and_value = transformed_df_date.copy(deep=True)
transformed_df_date_and_value.y = transformed_df_date_and_value.y.apply(
lambda x: x * 2
)
# DataFrame with date offset (multivariate)
transformed_df_date_multi = transformed_df_date.copy(deep=True)
transformed_df_date_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_multi.y * 2
)
transformed_df_date_concat_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_multi, ignore_index=True
)
transformed_df_date_concat_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date
)
transformed_df_date_double_multi = transformed_df_date_double.copy(deep=True)
transformed_df_date_double_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_double_multi.y * 2
)
transformed_df_date_concat_double_multi = self.MULTIVAR_AIR_DF.append(
transformed_df_date_double_multi, ignore_index=True
)
transformed_df_date_concat_double_mixed = self.MULTIVAR_AIR_DF_DATETIME.append(
transformed_df_date_double
)
# DataFrame with value offset (multivariate)
transformed_df_value_none_multi = self.MULTIVAR_AIR_DF.copy(deep=True)
transformed_df_value_none_multi.y = transformed_df_value_none_multi.y_1
transformed_df_value_none_multi.y_1 = np.nan
# DataFrame with date and value offset (multivariate)
transformed_df_date_and_value_multi = transformed_df_date_and_value.copy(
deep=True
)
transformed_df_date_and_value_multi[VALUE_COL_NAME + "_1"] = (
transformed_df_date_and_value_multi.y * 2
)
# DataFrame with all constant values
df_zeros = self.AIR_DF.copy(deep=True)
df_zeros.y.values[:] = 0
df_ones = self.AIR_DF.copy(deep=True)
df_ones.y.values[:] = 1
df_twos = df_ones.copy(deep=True)
df_twos.y.values[:] = 2
df_neg_ones = self.AIR_DF.copy(deep=True)
df_neg_ones.y.values[:] = -1
df_ones_multi = df_ones.copy(deep=True)
df_ones_multi[VALUE_COL_NAME + "_1"] = df_ones_multi.y * 2
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ_1 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_2 = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
self.ts_univ_default_names = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
self.ts_univ_default_names_2 = TimeSeriesData(df=self.AIR_DF_WITH_DEFAULT_NAMES)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi_1 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset
self.ts_date_transform_univ = TimeSeriesData(
df=transformed_df_date, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_univ = TimeSeriesData(
df=transformed_df_date_concat, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_univ = TimeSeriesData(
df=transformed_df_date_double, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_univ = TimeSeriesData(
df=transformed_df_date_concat_double, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date offset (multivariate)
self.ts_date_transform_multi = TimeSeriesData(
df=transformed_df_date_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_multi = TimeSeriesData(
df=transformed_df_date_concat_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_mixed = TimeSeriesData(
df=transformed_df_date_concat_mixed, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_double_multi = TimeSeriesData(
df=transformed_df_date_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_multi = TimeSeriesData(
df=transformed_df_date_concat_double_multi, time_col_name=TIME_COL_NAME
)
self.ts_date_transform_concat_double_mixed = TimeSeriesData(
df=transformed_df_date_concat_double_mixed, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset
self.ts_value_transform_univ = TimeSeriesData(
df=transformed_df_value, time_col_name=TIME_COL_NAME
)
self.ts_value_transform_inv_univ = TimeSeriesData(
df=transformed_df_value_inv, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with value offset (multivariate)
self.ts_value_transform_none_multi = TimeSeriesData(
df=transformed_df_value_none_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset
self.ts_date_and_value_transform_univ = TimeSeriesData(
df=transformed_df_date_and_value, time_col_name=TIME_COL_NAME
)
# TimeSeriesData with date and value offset (multivariate)
self.ts_date_and_value_transform_multi = TimeSeriesData(
df=transformed_df_date_and_value_multi, time_col_name=TIME_COL_NAME
)
# TimeSeriesData object with all constant values
self.ts_zero = TimeSeriesData(df=df_zeros, time_col_name=TIME_COL_NAME)
self.ts_ones = TimeSeriesData(df=df_ones, time_col_name=TIME_COL_NAME)
self.ts_twos = TimeSeriesData(df=df_twos, time_col_name=TIME_COL_NAME)
self.ts_neg_ones = TimeSeriesData(df=df_neg_ones, time_col_name=TIME_COL_NAME)
self.ts_ones_multi = TimeSeriesData(
df=df_ones_multi, time_col_name=TIME_COL_NAME
)
# Empty TimeSeriesData Object
self.ts_empty = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_with_cols = TimeSeriesData(
df=EMPTY_DF_WITH_COLS, time_col_name=TIME_COL_NAME
)
# Copies for Extended objects
self.ts_univ_extend = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_2 = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_univ_extend_err = TimeSeriesData(
df=self.AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_3 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_4 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_multi_extend_err_2 = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
self.ts_empty_extend = TimeSeriesData(df=EMPTY_DF)
self.ts_empty_extend_err = TimeSeriesData(df=EMPTY_DF)
# Other values
self.length = len(self.AIR_DF)
def test_eq(self) -> None:
# Univariate equality
self.assertTrue(self.ts_univ_1 == self.ts_univ_2)
# Multivariate equality
self.assertTrue(self.ts_multi_1 == self.ts_multi_2)
# Univariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_date_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_value_transform_univ)
self.assertFalse(self.ts_univ_1 == self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertFalse(self.ts_multi_1 == self.ts_date_transform_multi)
self.assertFalse(self.ts_multi_1 == self.ts_value_transform_none_multi)
self.assertFalse(self.ts_multi_1 == self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertFalse(self.ts_univ_1 == self.ts_multi_1)
self.assertFalse(self.ts_multi_1 == self.ts_univ_1)
def test_ne(self) -> None:
# Univariate equality
self.assertFalse(self.ts_univ_1 != self.ts_univ_2)
# Multivariate equality
self.assertFalse(self.ts_multi_1 != self.ts_multi_2)
# Univariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_date_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_value_transform_univ)
self.assertTrue(self.ts_univ_1 != self.ts_date_and_value_transform_univ)
# Multivariate inequality
self.assertTrue(self.ts_multi_1 != self.ts_date_transform_multi)
self.assertTrue(self.ts_multi_1 != self.ts_value_transform_none_multi)
self.assertTrue(self.ts_multi_1 != self.ts_date_and_value_transform_multi)
# Univariate vs. Multivariate inequality
self.assertTrue(self.ts_univ_1 != self.ts_multi_1)
self.assertTrue(self.ts_multi_1 != self.ts_univ_1)
def test_add(self) -> None:
# Add same DataFrames
self.assertEqual(self.ts_univ_1 + self.ts_univ_2, self.ts_value_transform_univ)
# Add different DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_value_transform_inv_univ, self.ts_zero
)
# Add Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_univ_1 + self.ts_multi_1, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty + self.ts_empty, self.ts_empty)
# Add DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 + self.ts_date_transform_univ
def test_sub(self) -> None:
# Subtract same DataFrames
self.assertEqual(self.ts_univ_1 - self.ts_univ_2, self.ts_zero)
# Subtract different DataFrames
self.assertEqual(
self.ts_univ_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_univ,
)
# Subtract Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 - self.ts_value_transform_inv_univ,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty - self.ts_empty, self.ts_empty)
# Subtract DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 - self.ts_date_transform_univ
def test_div(self) -> None:
# Divide same DataFrames
self.assertEqual(self.ts_univ_1 / self.ts_univ_2, self.ts_ones)
# Divide different DataFrames
self.assertEqual(
self.ts_univ_1 / self.ts_value_transform_inv_univ, self.ts_neg_ones
)
# Divide Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_value_transform_univ / self.ts_ones_multi,
self.ts_value_transform_none_multi,
)
# Empty Case
self.assertEqual(self.ts_empty / self.ts_empty, self.ts_empty)
# Divide DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 / self.ts_date_transform_univ
def test_mul(self) -> None:
# Multiply same DataFrames
self.assertEqual(self.ts_ones * self.ts_ones, self.ts_ones)
# Multiply different DataFrames
self.assertEqual(self.ts_univ_1 * self.ts_twos, self.ts_value_transform_univ)
# Multiply Univariate and Multivariate DataFrames
self.assertEqual(
self.ts_multi_1 * self.ts_twos, self.ts_value_transform_none_multi
)
# Empty Case
self.assertEqual(self.ts_empty * self.ts_empty, self.ts_empty)
# Multiply DataFrames with different dates
with self.assertRaises(ValueError):
self.ts_univ_1 * self.ts_date_transform_univ
def test_len(self) -> None:
# Normal case
self.assertEqual(len(self.ts_univ_1), self.length)
# Empty case
self.assertEqual(len(self.ts_empty), 0)
def test_empty(self) -> None:
# Empty case
self.assertTrue(self.ts_empty.is_empty())
# Not empty case
self.assertFalse(self.ts_univ_1.is_empty())
def test_extend(self) -> None:
# Testing cases with validate=True
# Univariate case
self.ts_univ_extend.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_univ_extend, self.ts_date_transform_concat_univ)
# Multivariate case
self.ts_multi_extend.extend(self.ts_date_transform_multi)
self.assertEqual(self.ts_multi_extend, self.ts_date_transform_concat_multi)
# Univariate and multivariate case
self.ts_multi_extend_2.extend(self.ts_date_transform_univ)
self.assertEqual(self.ts_multi_extend_2, self.ts_date_transform_concat_mixed)
# Empty case
self.ts_univ_default_names.extend(self.ts_empty)
self.assertEqual(self.ts_univ_default_names, self.ts_univ_default_names_2)
# Catching errors
with self.assertRaises(ValueError):
self.ts_univ_extend_err.extend(self.ts_date_transform_double_univ)
# Multivariate case
self.ts_multi_extend_err.extend(self.ts_date_transform_double_multi)
# Univariate and multivariate case
self.ts_multi_extend_err_2.extend(self.ts_date_transform_double_univ)
# Empty case
self.ts_empty_extend_err.extend(self.ts_empty)
# Testing cases with validate=False
# Univariate case
self.ts_univ_extend_2.extend(self.ts_date_transform_double_univ, validate=False)
self.assertEqual(
self.ts_univ_extend_2, self.ts_date_transform_concat_double_univ
)
# Multivariate case
self.ts_multi_extend_3.extend(
self.ts_date_transform_double_multi, validate=False
)
self.assertEqual(
self.ts_multi_extend_3, self.ts_date_transform_concat_double_multi
)
# Univariate and multivariate case
self.ts_multi_extend_4.extend(
self.ts_date_transform_double_univ, validate=False
)
self.assertEqual(
self.ts_multi_extend_4, self.ts_date_transform_concat_double_mixed
)
# Empty case
self.ts_empty_extend.extend(self.ts_empty, validate=False)
self.assertEqual(self.ts_empty_extend, self.ts_empty)
def test_get_item(self) -> None:
# Univariate test case
self.assertEqual(
self.ts_date_transform_concat_univ[: len(self.ts_univ_1)], self.ts_univ_1
)
# Multivariate test case
self.assertEqual(
self.ts_date_transform_concat_multi[: len(self.ts_multi_1)], self.ts_multi_1
)
# Multivariate test case where we select a specific column
for col in self.ts_date_transform_concat_multi.value.columns:
ts_univ = TimeSeriesData(
time=self.ts_date_transform_concat_multi.time,
value=self.ts_date_transform_concat_multi.value[col],
time_col_name=self.ts_date_transform_concat_multi.time_col_name,
)
self.assertEqual(self.ts_date_transform_concat_multi[col], ts_univ)
# Multivariate test case where we select multiple columns
self.assertEqual(
self.ts_date_transform_concat_multi[MULTIVAR_VALUE_DF_COLS],
self.ts_date_transform_concat_multi,
)
# Full/Empty cases
self.assertEqual(self.ts_univ_1[:], self.ts_univ_1)
self.assertEqual(
self.ts_univ_1[0:0],
TimeSeriesData(
time=pd.Series(name=TIME_COL_NAME),
value=pd.Series(name=VALUE_COL_NAME),
time_col_name=TIME_COL_NAME,
),
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot(self) -> plt.Figure:
# Univariate test case
ax = self.ts_univ_1.plot(cols=["y"])
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_multivariate(self) -> plt.Figure:
# Multivariate test case
ax = self.ts_multi_1.plot()
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_params(self) -> plt.Figure:
# Test more parameter overrides.
ax = self.ts_multi_1.plot(
figsize=(8, 3), plot_kwargs={"cmap": "Purples"}, grid=False
)
self.assertIsNotNone(ax)
return plt.gcf()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `pytest.mark.mpl_image_compare`.
@pytest.mark.mpl_image_compare
def test_plot_grid_ax(self) -> plt.Figure:
# Test grid and ax parameter overrides.
fig, ax = plt.subplots(figsize=(6, 4))
ax = self.ts_univ_1.plot(ax=ax, grid_kwargs={"lw": 2, "ls": ":"})
self.assertIsNotNone(ax)
return fig
def test_plot_missing_column(self) -> None:
# Columns not in data.
with self.assertRaises(ValueError):
self.ts_univ_1.plot(cols=["z"])
def test_plot_empty(self) -> None:
# No data to plot.
with self.assertRaises(ValueError):
self.ts_empty.plot()
class TimeSeriesDataMiscTest(TimeSeriesBaseTest):
def setUp(self) -> None:
super(TimeSeriesDataMiscTest, self).setUp()
# Creating TimeSeriesData objects
# Univariate TimeSeriesData initialized from a pd.DataFrame
self.ts_univ = TimeSeriesData(df=self.AIR_DF, time_col_name=TIME_COL_NAME)
# Multivariate TimeSeriesData initialized from a pd.DataFrame
self.ts_multi = TimeSeriesData(
df=self.MULTIVAR_AIR_DF, time_col_name=TIME_COL_NAME
)
def test_is_univariate(self) -> None:
# Univariate case
self.assertTrue(self.ts_univ.is_univariate())
# Multivariate case
self.assertFalse(self.ts_multi.is_univariate())
def test_time_to_index(self) -> None:
# Univariate case
assert_index_equal(self.ts_univ.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
# Multivariate case
assert_index_equal(self.ts_multi.time_to_index(), self.AIR_TIME_DATETIME_INDEX)
def test_repr(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ.__repr__(), self.AIR_DF_DATETIME.__repr__())
# Multivariate case
self.assertEqual(
self.ts_multi.__repr__(), self.MULTIVAR_AIR_DF_DATETIME.__repr__()
)
def test_repr_html(self) -> None:
# Univariate case
self.assertEqual(self.ts_univ._repr_html_(), self.AIR_DF_DATETIME._repr_html_())
# Multivariate case
self.assertEqual(
self.ts_multi._repr_html_(), self.MULTIVAR_AIR_DF_DATETIME._repr_html_()
)
class TSIteratorTest(TestCase):
def test_ts_iterator_univariate_next(self) -> None:
df = pd.DataFrame(
[["2020-03-01", 100], ["2020-03-02", 120], ["2020-03-03", 130]],
columns=["time", "y"],
)
kats_data = TimeSeriesData(df=df)
kats_iterator = TSIterator(kats_data)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-01")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([100]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-02")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), pd.Series([120]), check_names=False
)
val = next(kats_iterator)
assert_series_equal(
val.time, pd.Series([pd.Timestamp("2020-03-03")]), check_names=False
)
assert_series_equal(
cast(pd.Series, val.value), | pd.Series([130]) | pandas.Series |
from fastapi import APIRouter, HTTPException
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import os
router = APIRouter()
DATA_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "..","data", "job_industry_data.csv")
@router.get('/jobs/{city_id}')
async def pop_to_dict(city_id: int):
"""
Job Industry insights
"""
rt_dict = {}
rt_data_dict = {}
df = pd.read_csv(DATA_FILEPATH, encoding='utf-8')
dataframe = df[df['city_id']==city_id]
rt_data = dataframe.to_numpy()
rt_data_dict['id'] = rt_data[0][1]
rt_data_dict['city'] = rt_data[0][2]
rt_data_dict['state'] = rt_data[0][3]
rt_data_dict['city_state'] = rt_data[0][4]
rt_data_dict["job_ranked_1"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[0]
rt_data_dict["job_ranked_1_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[0]
rt_data_dict["job_ranked_2"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[1]
rt_data_dict["job_ranked_2_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[1]
rt_data_dict["job_ranked_3"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[2]
rt_data_dict["job_ranked_3_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[2]
rt_data_dict["job_ranked_4"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[3]
rt_data_dict["job_ranked_4_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[3]
rt_data_dict["job_ranked_5"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[4]
rt_data_dict["job_ranked_5_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[4]
rt_data_dict["job_ranked_6"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[5]
rt_data_dict["job_ranked_6_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[5]
rt_data_dict["job_ranked_7"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[6]
rt_data_dict["job_ranked_7_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[6]
rt_data_dict["job_ranked_8"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[7]
rt_data_dict["job_ranked_8_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[7]
rt_data_dict["job_ranked_9"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[8]
rt_data_dict["job_ranked_9_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[8]
rt_data_dict["job_ranked_10"] = top_jobs(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[9]
rt_data_dict["job_ranked_10_%"] = percentage(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10).iloc[9]
rt_dict["data"] = rt_data_dict
rt_dict["viz"] = cityjobsviz(df = pd.read_csv(DATA_FILEPATH, encoding='utf-8'), city_id=rt_data[0][1], n_industries=10)
return rt_dict
def cityjobsviz(df, city_id , n_industries = 10):
df = | pd.read_csv(DATA_FILEPATH, encoding='utf-8') | pandas.read_csv |
import urllib.request
import json
from credentials import API_KEY
from stores import locations, station
BASE_URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?key=%s&' % API_KEY
def lookup(locations):
grid = []
for i in range(len(locations)-1):
origin = '%s,%s' % locations[i]
for j in range(i+1, len(locations)):
print(i,j)
dest = '%s,%s' % locations[j]
url = (BASE_URL + 'origins={}&destinations={}&mode=driving&sensor=false').format(origin, dest)
seconds = 0
if True:
html = urllib.request.urlopen(url).read()
data = json.loads(html)
try:
seconds = data["rows"][0]["elements"][0]["duration"]["value"]
except:
print('failed to get time')
print(field1, field2)
raise
return
grid.append((i, j, seconds))
table = | pd.DataFrame(grid, columns=['from', 'to', 'weight']) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:nomarker
# text_representation:
# extension: .py
# format_name: nomarker
# format_version: '1.0'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CNN - Example 01
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ### Load Keras Dataset
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# #### Visualize data
print(x_train.shape)
single_image = x_train[0]
print(single_image.shape)
plt.imshow(single_image)
# ### Pre-Process data
# #### One Hot encode
# Make it one hot encoded otherwise it will think as a regression problem on a continuous axis
from tensorflow.keras.utils import to_categorical
print("Shape before one hot encoding" +str(y_train.shape))
y_example = to_categorical(y_train)
print(y_example)
print("Shape after one hot encoding" +str(y_train.shape))
y_example[0]
y_cat_test = to_categorical(y_test,10)
y_cat_train = to_categorical(y_train,10)
# #### Normalize the images
x_train = x_train/255
x_test = x_test/255
scaled_single = x_train[0]
plt.imshow(scaled_single)
# #### Reshape the images
# Reshape to include channel dimension (in this case, 1 channel)
# x_train.shape
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000,28,28,1)
# ### Image data augmentation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# help(ImageDataGenerator)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(x_train)
it = datagen.flow(x_train, y_cat_train, batch_size=32)
# Preparing the Samples and Plot for displaying output
for i in range(9):
# preparing the subplot
plt.subplot(330 + 1 + i)
# generating images in batches
batch = it.next()
# Remember to convert these images to unsigned integers for viewing
image = batch[0][0].astype('uint8')
# Plotting the data
plt.imshow(image)
# Displaying the figure
plt.show()
# ### Model # 1
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(4,4), input_shape=(28, 28, 1), activation='relu',))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
# Notes : If y is not one hot coded then loss= sparse_categorical_crossentropy
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy', 'categorical_accuracy'])
# we can add in additional metrics https://keras.io/metrics/
model.summary()
# #### Add Early Stopping
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', patience=2)
# ##### Training using one hot encoding
# fits the model on batches with real-time data augmentation:
history = model.fit(datagen.flow(x_train, y_cat_train, batch_size=32),
epochs=10,
steps_per_epoch=len(x_train) / 32,
validation_data=(x_test,y_cat_test),
callbacks=[early_stop])
# #### Save model
# Saving model
# from tensorflow.keras.models import load_model
# model_file = 'D:\\Sandbox\\Github\\MODELS\\' + '01_mnist.h5'
# model.save(model_file)
# #### Retreive model
# Retrieve model
# model = load_model(model_file)
# #### Evaluate
# Rule of thumb
# 1. High Bias accuracy = 80% val-accuracy = 78% (2% gap)
# 2. High Variance accuracy = 98% val-accuracy = 80% (18% gap)
# 3. High Bias and High Variance accuracy = 80% val-accuracy = 60% (20% gap)
# 4. Low Bias and Low Variance accuracy = 98% val-accuracy = 96% (2% gap)
# #### Eval - Train
model.metrics_names
pd.DataFrame(history.history).head()
#pd.DataFrame(model.history.history).head()
# pd.DataFrame(history.history).plot()
losses = | pd.DataFrame(history.history) | pandas.DataFrame |
import logging
import os
import re
import shutil
import sys
import warnings
from datetime import datetime
from typing import Union
import h5py
import numpy as np
import pandas as pd
from omegaconf import DictConfig, OmegaConf, ListConfig
from tqdm import tqdm
import deepethogram
from deepethogram.utils import get_subfiles, log
from deepethogram.zscore import zscore_video
from . import utils
from .file_io import read_labels, convert_video
log = logging.getLogger(__name__)
required_keys = ['project', 'augs']
projects_file_directory = os.path.dirname(os.path.abspath(__file__))
def initialize_project(directory: Union[str, os.PathLike],
project_name: str,
behaviors: list = None,
make_subdirectory: bool = True,
labeler: str = None):
"""Initializes a DeepEthogram project.
Copies the default configuration file and updates it with the directory, name, and behaviors specified.
Makes directories where project info, data, and models will live.
Args:
directory: str, os.PathLike
Directory where DeepEthogram data and models will be made / copied. Should be on an SSD. Should
also have plenty of space.
project_name: str
name of the deepethogram project
behaviors: optional list.
First should be background.
make_subdirectory: bool
if True, make a subdirectory like "/path/to/DATA/project_name_deepethogram"
if False, keep as the input directory: "/path/to/DATA"
Example:
intialize_project('C:/DATA', 'grooming', ['background', 'face_groom', 'body_groom', 'rear'])
"""
assert os.path.isdir(directory), 'Directory does not exist: {}'.format(directory)
if behaviors is not None:
assert behaviors[0] == 'background'
root = os.path.dirname(os.path.abspath(__file__))
project_config = utils.load_yaml(os.path.join(root, 'conf', 'project', 'project_config_default.yaml'))
project_name = project_name.replace(' ', '_')
project_config['project']['name'] = project_name
project_config['project']['class_names'] = behaviors
if make_subdirectory:
project_dir = os.path.join(directory, '{}_deepethogram'.format(project_name))
else:
project_dir = directory
project_config['project']['path'] = project_dir
project_config['project']['data_path'] = 'DATA'
project_config['project']['model_path'] = 'models'
project_config['project']['labeler'] = labeler
if not os.path.isdir(project_config['project']['path']):
os.makedirs(project_config['project']['path'])
# os.chdir(project_config['project']['path'])
data_abs = os.path.join(project_config['project']['path'], project_config['project']['data_path'])
if not os.path.isdir(data_abs):
os.makedirs(data_abs)
model_abs = os.path.join(project_config['project']['path'], project_config['project']['model_path'])
if not os.path.isdir(model_abs):
os.makedirs(model_abs)
fname = os.path.join(project_dir, 'project_config.yaml')
project_config['project']['config_file'] = fname
utils.save_dict_to_yaml(project_config, fname)
return project_config
def add_video_to_project(project: dict, path_to_video: Union[str, os.PathLike], mode: str = 'copy') -> str:
"""
Adds a video file to a DEG project.
1. Copies the video file to the project's data directory
2. initializes a record.yaml file
3. Computes per-channel image statistics (for input normalization)
Parameters
----------
project: dict
pre-loaded configuration dictionary
path_to_video: str, PathLike
absolute path to a video file. Filetype must be acceptable to deepethogram.file_io.VideoReader
mode: str
if 'copy': copies files to new directory
if 'symlink': tries to make a symlink from the old location to the new location. NOT RECOMMENDED. if you delete
the video in its current location, the symlink will break, and we will have errors during training or
inference
if 'move': moves the file
Returns
-------
new_path: str
path to the video file after moving to the DEG project data directory.
"""
# assert (os.path.isdir(project_directory))
assert os.path.exists(path_to_video), 'video not found! {}'.format(path_to_video)
if os.path.isdir(path_to_video):
copy_func = shutil.copytree
elif os.path.isfile(path_to_video):
copy_func = shutil.copy
else:
raise ValueError('video does not exist: {}'.format(path_to_video))
assert mode in ['copy', 'symlink', 'move']
# project = utils.load_yaml(os.path.join(project_directory, 'project_config.yaml'))
# project = convert_config_paths_to_absolute(project)
log.debug('configuration file when adding video: {}'.format(project))
datadir = os.path.join(project['project']['path'], project['project']['data_path'])
assert os.path.isdir(datadir), 'data path not found: {}'.format(datadir)
# for speed during training, videos can be saved as directories of PNG / JPEG files.
if os.path.isdir(path_to_video):
video_is_directory = True
else:
video_is_directory = False
basename = os.path.basename(path_to_video)
vidname = os.path.splitext(basename)[0]
video_directory = os.path.join(datadir, vidname)
if os.path.isdir(video_directory):
raise ValueError('Directory {} already exists in your data dir! ' \
'Please rename the video to a unique name'.format(vidname))
os.makedirs(video_directory)
new_path = os.path.join(video_directory, basename)
if mode == 'copy':
if video_is_directory:
shutil.copytree(path_to_video, new_path)
else:
shutil.copy(path_to_video, new_path)
elif mode == 'symlink':
os.symlink(path_to_video, new_path)
elif mode == 'move':
shutil.move(path_to_video, new_path)
else:
raise ValueError('invalid argument to mode: {}'.format(mode))
record = parse_subdir(video_directory)
log.debug('New record after adding: {}'.format(record))
utils.save_dict_to_yaml(record, os.path.join(video_directory, 'record.yaml'))
zscore_video(os.path.join(video_directory, basename), project)
return new_path
def add_label_to_project(path_to_labels: Union[str, os.PathLike], path_to_video) -> str:
"""Adds an externally created label file to the project. Updates record"""
assert os.path.isfile(path_to_labels)
assert os.path.isfile(path_to_video)
assert is_deg_file(path_to_video)
viddir = os.path.dirname(path_to_video)
label_dst = os.path.join(viddir, os.path.basename(path_to_labels))
if os.path.isfile(label_dst):
warnings.warn('Label already exists in destination {}, overwriting...'.format(label_dst))
df = | pd.read_csv(path_to_labels, index_col=0) | pandas.read_csv |
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
import datetime
import decimal
import os
import random
from io import BytesIO
from string import ascii_lowercase
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.orc
import pyorc as po
import pytest
import cudf
from cudf.core.dtypes import Decimal64Dtype
from cudf.io.orc import ORCWriter
from cudf.testing._utils import (
assert_eq,
gen_rand_series,
supported_numpy_dtypes,
)
@pytest.fixture(scope="module")
def datadir(datadir):
return datadir / "orc"
@pytest.fixture
def path_or_buf(datadir):
fname = datadir / "TestOrcFile.test1.orc"
try:
with open(fname, "rb") as f:
buffer = BytesIO(f.read())
except Exception as excpr:
if type(excpr).__name__ == "FileNotFoundError":
pytest.skip(".parquet file is not found")
else:
print(type(excpr).__name__)
def _make_path_or_buf(src):
if src == "filepath":
return str(fname)
if src == "pathobj":
return fname
if src == "bytes_io":
return buffer
if src == "bytes":
return buffer.getvalue()
if src == "url":
return fname.as_uri()
raise ValueError("Invalid source type")
yield _make_path_or_buf
@pytest.mark.filterwarnings("ignore:Using CPU")
@pytest.mark.parametrize("engine", ["pyarrow", "cudf"])
@pytest.mark.parametrize("use_index", [False, True])
@pytest.mark.parametrize(
"inputfile, columns",
[
("TestOrcFile.emptyFile.orc", ["boolean1"]),
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.RLEv2.orc", ["x", "y"]),
("TestOrcFile.testSnappy.orc", None),
("TestOrcFile.demo-12-zlib.orc", ["_col2", "_col3", "_col4", "_col5"]),
],
)
def test_orc_reader_basic(datadir, inputfile, columns, use_index, engine):
path = datadir / inputfile
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read(columns=columns).to_pandas()
got = cudf.read_orc(
path, engine=engine, columns=columns, use_index=use_index
)
assert_eq(expect, got, check_categorical=False)
def test_orc_reader_filenotfound(tmpdir):
with pytest.raises(FileNotFoundError):
cudf.read_orc("TestMissingFile.orc")
with pytest.raises(FileNotFoundError):
cudf.read_orc(tmpdir.mkdir("cudf_orc"))
def test_orc_reader_local_filepath():
path = "~/TestLocalFile.orc"
if not os.path.isfile(path):
pytest.skip("Local .orc file is not found")
cudf.read_orc(path)
@pytest.mark.parametrize(
"src", ["filepath", "pathobj", "bytes_io", "bytes", "url"]
)
def test_orc_reader_filepath_or_buffer(path_or_buf, src):
cols = ["int1", "long1", "float1", "double1"]
orcfile = pa.orc.ORCFile(path_or_buf("filepath"))
expect = orcfile.read(columns=cols).to_pandas()
got = cudf.read_orc(path_or_buf(src), columns=cols)
assert_eq(expect, got)
def test_orc_reader_trailing_nulls(datadir):
path = datadir / "TestOrcFile.nulls-at-end-snappy.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read().to_pandas().fillna(0)
got = cudf.read_orc(path).fillna(0)
# PANDAS uses NaN to represent invalid data, which forces float dtype
# For comparison, we can replace NaN with 0 and cast to the cuDF dtype
for col in expect.columns:
expect[col] = expect[col].astype(got[col].dtype)
assert_eq(expect, got, check_categorical=False)
@pytest.mark.parametrize("use_index", [False, True])
@pytest.mark.parametrize(
"inputfile",
["TestOrcFile.testDate1900.orc", "TestOrcFile.testDate2038.orc"],
)
def test_orc_reader_datetimestamp(datadir, inputfile, use_index):
path = datadir / inputfile
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas(date_as_object=False)
gdf = cudf.read_orc(path, use_index=use_index)
assert_eq(pdf, gdf, check_categorical=False)
def test_orc_reader_strings(datadir):
path = datadir / "TestOrcFile.testStringAndBinaryStatistics.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read(columns=["string1"])
got = cudf.read_orc(path, columns=["string1"])
assert_eq(expect, got, check_categorical=False)
def test_orc_read_statistics(datadir):
# Read in file containing 2 columns ("int1" and "string1") and 3 stripes
# (sizes 5000, 5000 and 1000 respectively). Each stripe has the same value
# in every one of its rows. The values the stripes have are 1, 2, and 3 in
# "int1" and "one", "two", and "three" in "string1".
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
try:
(
file_statistics,
stripes_statistics,
) = cudf.io.orc.read_orc_statistics([path, path])
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Check numberOfValues
assert_eq(file_statistics[0]["int1"]["number_of_values"], 11_000)
assert_eq(
file_statistics[0]["int1"]["number_of_values"],
sum(
[
stripes_statistics[0]["int1"]["number_of_values"],
stripes_statistics[1]["int1"]["number_of_values"],
stripes_statistics[2]["int1"]["number_of_values"],
]
),
)
assert_eq(
stripes_statistics[1]["int1"]["number_of_values"],
stripes_statistics[1]["string1"]["number_of_values"],
)
assert_eq(stripes_statistics[2]["string1"]["number_of_values"], 1_000)
# Check other statistics
assert_eq(stripes_statistics[2]["string1"]["has_null"], False)
assert_eq(
file_statistics[0]["int1"]["minimum"],
min(
stripes_statistics[0]["int1"]["minimum"],
stripes_statistics[1]["int1"]["minimum"],
stripes_statistics[2]["int1"]["minimum"],
),
)
assert_eq(file_statistics[0]["int1"]["minimum"], 1)
assert_eq(file_statistics[0]["string1"]["minimum"], "one")
@pytest.mark.filterwarnings("ignore:Using CPU")
@pytest.mark.parametrize("engine", ["cudf", "pyarrow"])
@pytest.mark.parametrize(
"predicate,expected_len",
[
([[("int1", "==", 1)]], 5000),
([[("int1", "<=", 2)]], 10000),
([[("int1", "==", -1)]], 0),
([[("int1", "in", range(3))]], 10000),
([[("int1", "in", {1, 3})]], 6000),
([[("int1", "not in", {1, 3})]], 5000),
],
)
def test_orc_read_filtered(datadir, engine, predicate, expected_len):
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
try:
df_filtered = cudf.read_orc(path, engine=engine, filters=predicate)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Assert # of rows after filtering
assert len(df_filtered) == expected_len
@pytest.mark.filterwarnings("ignore:Using CPU")
@pytest.mark.parametrize("engine", ["cudf", "pyarrow"])
def test_orc_read_stripes(datadir, engine):
path = datadir / "TestOrcFile.testDate1900.orc"
try:
pdf = cudf.read_orc(path, engine=engine)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
num_rows, stripes, col_names = cudf.io.read_orc_metadata(path)
# Read stripes one at a time
gdf = [
cudf.read_orc(path, engine=engine, stripes=[[i]])
for i in range(stripes)
]
gdf = cudf.concat(gdf).reset_index(drop=True)
assert_eq(pdf, gdf, check_categorical=False, check_index_type=True)
# Read stripes all at once
gdf = cudf.read_orc(
path, engine=engine, stripes=[[int(x) for x in range(stripes)]]
)
assert_eq(pdf, gdf, check_categorical=False)
# Read only some stripes
gdf = cudf.read_orc(path, engine=engine, stripes=[[0, 1]])
assert_eq(gdf, pdf.head(25000))
gdf = cudf.read_orc(path, engine=engine, stripes=[[0, stripes - 1]])
assert_eq(
gdf,
cudf.concat([pdf.head(15000), pdf.tail(10000)], ignore_index=True),
check_index_type=True,
)
@pytest.mark.parametrize("num_rows", [1, 100, 3000])
@pytest.mark.parametrize("skiprows", [0, 1, 3000])
def test_orc_read_rows(datadir, skiprows, num_rows):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path, skiprows=skiprows, num_rows=num_rows)
# Slice rows out of the whole dataframe for comparison as PyArrow doesn't
# have an API to read a subsection of rows from the file
pdf = pdf[skiprows : skiprows + num_rows]
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
def test_orc_read_skiprows(tmpdir):
buff = BytesIO()
df = pd.DataFrame(
{"a": [1, 0, 1, 0, None, 1, 1, 1, 0, None, 0, 0, 1, 1, 1, 1]},
dtype=pd.BooleanDtype(),
)
writer = po.Writer(buff, po.Struct(a=po.Boolean()))
tuples = list(
map(
lambda x: (None,) if x[0] is pd.NA else x,
list(df.itertuples(index=False, name=None)),
)
)
writer.writerows(tuples)
writer.close()
skiprows = 10
expected = cudf.read_orc(buff)[skiprows::].reset_index(drop=True)
got = cudf.read_orc(buff, skiprows=skiprows)
assert_eq(expected, got)
def test_orc_reader_uncompressed_block(datadir):
path = datadir / "uncompressed_snappy.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
expect = orcfile.read().to_pandas()
got = cudf.read_orc(path)
assert_eq(expect, got, check_categorical=False)
def test_orc_reader_nodata_block(datadir):
path = datadir / "nodata.orc"
try:
orcfile = pa.orc.ORCFile(path)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
expect = orcfile.read().to_pandas()
got = cudf.read_orc(path, num_rows=1)
assert_eq(expect, got, check_categorical=False)
@pytest.mark.parametrize("compression", [None, "snappy"])
@pytest.mark.parametrize(
"reference_file, columns",
[
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]),
],
)
def test_orc_writer(datadir, tmpdir, reference_file, columns, compression):
pdf_fname = datadir / reference_file
gdf_fname = tmpdir.join("gdf.orc")
try:
orcfile = pa.orc.ORCFile(pdf_fname)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
expect = orcfile.read(columns=columns).to_pandas()
cudf.from_pandas(expect).to_orc(gdf_fname.strpath, compression=compression)
got = pa.orc.ORCFile(gdf_fname).read(columns=columns).to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("compression", [None, "snappy"])
@pytest.mark.parametrize(
"reference_file, columns",
[
(
"TestOrcFile.test1.orc",
[
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
],
),
("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]),
],
)
def test_chunked_orc_writer(
datadir, tmpdir, reference_file, columns, compression
):
pdf_fname = datadir / reference_file
gdf_fname = tmpdir.join("chunked_gdf.orc")
try:
orcfile = pa.orc.ORCFile(pdf_fname)
except Exception as excpr:
if type(excpr).__name__ == "ArrowIOError":
pytest.skip(".orc file is not found")
else:
print(type(excpr).__name__)
pdf = orcfile.read(columns=columns).to_pandas()
gdf = cudf.from_pandas(pdf)
expect = pd.concat([pdf, pdf]).reset_index(drop=True)
writer = ORCWriter(gdf_fname, compression=compression)
writer.write_table(gdf)
writer.write_table(gdf)
writer.close()
got = pa.orc.ORCFile(gdf_fname).read(columns=columns).to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtypes",
[
{"c": str, "a": int},
{"c": int, "a": str},
{"c": int, "a": str, "b": float},
{"c": str, "a": object},
],
)
def test_orc_writer_strings(tmpdir, dtypes):
gdf_fname = tmpdir.join("gdf_strings.orc")
expect = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1)
expect.to_orc(gdf_fname)
got = pa.orc.ORCFile(gdf_fname).read().to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtypes",
[
{"c": str, "a": int},
{"c": int, "a": str},
{"c": int, "a": str, "b": float},
{"c": str, "a": object},
],
)
def test_chunked_orc_writer_strings(tmpdir, dtypes):
gdf_fname = tmpdir.join("chunked_gdf_strings.orc")
gdf = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1)
pdf = gdf.to_pandas()
expect = pd.concat([pdf, pdf]).reset_index(drop=True)
writer = ORCWriter(gdf_fname)
writer.write_table(gdf)
writer.write_table(gdf)
writer.close()
got = pa.orc.ORCFile(gdf_fname).read().to_pandas()
assert_eq(expect, got)
def test_orc_writer_sliced(tmpdir):
cudf_path = tmpdir.join("cudf.orc")
df = pd.DataFrame()
df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
df = cudf.from_pandas(df)
df_select = df.iloc[1:3]
df_select.to_orc(cudf_path)
assert_eq(cudf.read_orc(cudf_path), df_select.reset_index(drop=True))
@pytest.mark.parametrize(
"orc_file",
[
"TestOrcFile.decimal.orc",
"TestOrcFile.decimal.same.values.orc",
"TestOrcFile.decimal.multiple.values.orc",
# For additional information take look at PR 7034
"TestOrcFile.decimal.runpos.issue.orc",
],
)
def test_orc_reader_decimal_type(datadir, orc_file):
file_path = datadir / orc_file
try:
orcfile = pa.orc.ORCFile(file_path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
df = cudf.read_orc(file_path).to_pandas()
assert_eq(pdf, df)
def test_orc_decimal_precision_fail(datadir):
file_path = datadir / "TestOrcFile.int_decimal.precision_19.orc"
try:
orcfile = pa.orc.ORCFile(file_path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
# Max precision supported is 18 (Decimal64Dtype limit)
# and the data has the precision 19. This test should be removed
# once Decimal128Dtype is introduced.
with pytest.raises(RuntimeError):
cudf.read_orc(file_path)
# Shouldn't cause failure if decimal column is not chosen to be read.
pdf = orcfile.read(columns=["int"]).to_pandas()
gdf = cudf.read_orc(file_path, columns=["int"])
assert_eq(pdf, gdf)
# For additional information take look at PR 6636 and 6702
@pytest.mark.parametrize(
"orc_file",
[
"TestOrcFile.boolean_corruption_PR_6636.orc",
"TestOrcFile.boolean_corruption_PR_6702.orc",
],
)
def test_orc_reader_boolean_type(datadir, orc_file):
file_path = datadir / orc_file
pdf = pd.read_orc(file_path)
df = cudf.read_orc(file_path).to_pandas()
assert_eq(pdf, df)
def test_orc_reader_tzif_timestamps(datadir):
# Contains timstamps in the range covered by the TZif file
# Other timedate tests only cover "future" times
path = datadir / "TestOrcFile.lima_timezone.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path).to_pandas()
assert_eq(pdf, gdf)
def test_int_overflow(tmpdir):
file_path = tmpdir.join("gdf_overflow.orc")
# The number of rows and the large element trigger delta encoding
num_rows = 513
df = cudf.DataFrame({"a": [None] * num_rows}, dtype="int32")
df["a"][0] = 1024 * 1024 * 1024
df["a"][num_rows - 1] = 1
df.to_orc(file_path)
assert_eq(cudf.read_orc(file_path), df)
def normalized_equals(value1, value2):
if isinstance(value1, (datetime.datetime, np.datetime64)):
value1 = np.datetime64(value1, "ms")
if isinstance(value2, (datetime.datetime, np.datetime64)):
value2 = np.datetime64(value2, "ms")
# Compare integers with floats now
if isinstance(value1, float) or isinstance(value2, float):
return np.isclose(value1, value2)
return value1 == value2
@pytest.mark.parametrize("nrows", [1, 100, 6000000])
def test_orc_write_statistics(tmpdir, datadir, nrows):
supported_stat_types = supported_numpy_dtypes + ["str"]
# Can't write random bool columns until issue #6763 is fixed
if nrows == 6000000:
supported_stat_types.remove("bool")
# Make a dataframe
gdf = cudf.DataFrame(
{
"col_" + str(dtype): gen_rand_series(dtype, nrows, has_nulls=True)
for dtype in supported_stat_types
}
)
fname = tmpdir.join("gdf.orc")
# Write said dataframe to ORC with cuDF
gdf.to_orc(fname.strpath)
# Read back written ORC's statistics
orc_file = pa.orc.ORCFile(fname)
(file_stats, stripes_stats,) = cudf.io.orc.read_orc_statistics([fname])
# check file stats
for col in gdf:
if "minimum" in file_stats[0][col]:
stats_min = file_stats[0][col]["minimum"]
actual_min = gdf[col].min()
assert normalized_equals(actual_min, stats_min)
if "maximum" in file_stats[0][col]:
stats_max = file_stats[0][col]["maximum"]
actual_max = gdf[col].max()
assert normalized_equals(actual_max, stats_max)
if "number_of_values" in file_stats[0][col]:
stats_num_vals = file_stats[0][col]["number_of_values"]
actual_num_vals = gdf[col].count()
assert stats_num_vals == actual_num_vals
# compare stripe statistics with actual min/max
for stripe_idx in range(0, orc_file.nstripes):
stripe = orc_file.read_stripe(stripe_idx)
# pandas is unable to handle min/max of string col with nulls
stripe_df = cudf.DataFrame(stripe.to_pandas())
for col in stripe_df:
if "minimum" in stripes_stats[stripe_idx][col]:
actual_min = stripe_df[col].min()
stats_min = stripes_stats[stripe_idx][col]["minimum"]
assert normalized_equals(actual_min, stats_min)
if "maximum" in stripes_stats[stripe_idx][col]:
actual_max = stripe_df[col].max()
stats_max = stripes_stats[stripe_idx][col]["maximum"]
assert normalized_equals(actual_max, stats_max)
if "number_of_values" in stripes_stats[stripe_idx][col]:
stats_num_vals = stripes_stats[stripe_idx][col][
"number_of_values"
]
actual_num_vals = stripe_df[col].count()
assert stats_num_vals == actual_num_vals
@pytest.mark.parametrize("nrows", [1, 100, 6000000])
def test_orc_write_bool_statistics(tmpdir, datadir, nrows):
# Make a dataframe
gdf = cudf.DataFrame({"col_bool": gen_rand_series("bool", nrows)})
fname = tmpdir.join("gdf.orc")
# Write said dataframe to ORC with cuDF
gdf.to_orc(fname.strpath)
# Read back written ORC's statistics
orc_file = pa.orc.ORCFile(fname)
(file_stats, stripes_stats,) = cudf.io.orc.read_orc_statistics([fname])
# check file stats
col = "col_bool"
if "true_count" in file_stats[0][col]:
stats_true_count = file_stats[0][col]["true_count"]
actual_true_count = gdf[col].sum()
assert normalized_equals(actual_true_count, stats_true_count)
if "number_of_values" in file_stats[0][col]:
stats_valid_count = file_stats[0][col]["number_of_values"]
actual_valid_count = gdf[col].valid_count
assert normalized_equals(actual_valid_count, stats_valid_count)
# compare stripe statistics with actual min/max
for stripe_idx in range(0, orc_file.nstripes):
stripe = orc_file.read_stripe(stripe_idx)
# pandas is unable to handle min/max of string col with nulls
stripe_df = cudf.DataFrame(stripe.to_pandas())
if "true_count" in stripes_stats[stripe_idx][col]:
actual_true_count = stripe_df[col].sum()
stats_true_count = stripes_stats[stripe_idx][col]["true_count"]
assert normalized_equals(actual_true_count, stats_true_count)
if "number_of_values" in stripes_stats[stripe_idx][col]:
actual_valid_count = stripe_df[col].valid_count
stats_valid_count = stripes_stats[stripe_idx][col][
"number_of_values"
]
assert normalized_equals(actual_valid_count, stats_valid_count)
def test_orc_reader_gmt_timestamps(datadir):
path = datadir / "TestOrcFile.gmt.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path).to_pandas()
assert_eq(pdf, gdf)
def test_orc_bool_encode_fail():
np.random.seed(0)
buffer = BytesIO()
# Generate a boolean column longer than a single stripe
fail_df = cudf.DataFrame({"col": gen_rand_series("bool", 600000)})
# Invalidate the first row in the second stripe to break encoding
fail_df["col"][500000] = None
# Should throw instead of generating a file that is incompatible
# with other readers (see issue #6763)
with pytest.raises(RuntimeError):
fail_df.to_orc(buffer)
# Generate a boolean column that fits into a single stripe
okay_df = cudf.DataFrame({"col": gen_rand_series("bool", 500000)})
okay_df["col"][500000 - 1] = None
# Invalid row is in the last row group of the stripe;
# encoding is assumed to be correct
okay_df.to_orc(buffer)
# Also validate data
pdf = pa.orc.ORCFile(buffer).read().to_pandas()
assert_eq(okay_df, pdf)
def test_nanoseconds_overflow():
buffer = BytesIO()
# Use nanosecond values that take more than 32 bits to encode
s = cudf.Series([710424008, -1338482640], dtype="datetime64[ns]")
expected = cudf.DataFrame({"s": s})
expected.to_orc(buffer)
cudf_got = cudf.read_orc(buffer)
assert_eq(expected, cudf_got)
pyarrow_got = pa.orc.ORCFile(buffer).read()
assert_eq(expected.to_pandas(), pyarrow_got.to_pandas())
def test_empty_dataframe():
buffer = BytesIO()
expected = cudf.DataFrame()
expected.to_orc(buffer)
# Raise error if column name is mentioned, but it doesn't exist.
with pytest.raises(RuntimeError):
cudf.read_orc(buffer, columns=["a"])
got_df = cudf.read_orc(buffer)
expected_pdf = pd.read_orc(buffer)
assert_eq(expected, got_df)
assert_eq(expected_pdf, got_df)
@pytest.mark.parametrize(
"data", [[None, ""], ["", None], [None, None], ["", ""]]
)
def test_empty_string_columns(data):
buffer = BytesIO()
expected = cudf.DataFrame({"string": data}, dtype="str")
expected.to_orc(buffer)
expected_pdf = pd.read_orc(buffer)
got_df = cudf.read_orc(buffer)
assert_eq(expected, got_df)
assert_eq(expected_pdf, got_df)
@pytest.mark.parametrize("scale", [-3, 0, 3])
def test_orc_writer_decimal(tmpdir, scale):
np.random.seed(0)
fname = tmpdir / "decimal.orc"
expected = cudf.DataFrame({"dec_val": gen_rand_series("i", 100)})
expected["dec_val"] = expected["dec_val"].astype(Decimal64Dtype(7, scale))
expected.to_orc(fname)
got = pd.read_orc(fname)
assert_eq(expected.to_pandas()["dec_val"], got["dec_val"])
@pytest.mark.parametrize("num_rows", [1, 100, 3000])
def test_orc_reader_multiple_files(datadir, num_rows):
path = datadir / "TestOrcFile.testSnappy.orc"
df_1 = pd.read_orc(path)
df_2 = pd.read_orc(path)
df = pd.concat([df_1, df_2], ignore_index=True)
gdf = cudf.read_orc([path, path], num_rows=num_rows).to_pandas()
# Slice rows out of the whole dataframe for comparison as PyArrow doesn't
# have an API to read a subsection of rows from the file
df = df[:num_rows]
df = df.reset_index(drop=True)
assert_eq(df, gdf)
def test_orc_reader_multi_file_single_stripe(datadir):
path = datadir / "TestOrcFile.testSnappy.orc"
# should raise an exception
with pytest.raises(ValueError):
cudf.read_orc([path, path], stripes=[0])
def test_orc_reader_multi_file_multi_stripe(datadir):
path = datadir / "TestOrcFile.testStripeLevelStats.orc"
gdf = cudf.read_orc([path, path], stripes=[[0, 1], [2]])
pdf = pd.read_orc(path)
assert_eq(pdf, gdf)
def test_orc_string_stream_offset_issue():
size = 30000
vals = {
str(x): [decimal.Decimal(1)] * size if x != 0 else ["XYZ"] * size
for x in range(0, 5)
}
df = cudf.DataFrame(vals)
buffer = BytesIO()
df.to_orc(buffer)
assert_eq(df, cudf.read_orc(buffer))
# Data is generated using pyorc module
def generate_list_struct_buff(size=100_000):
rd = random.Random(1)
np.random.seed(seed=1)
buff = BytesIO()
schema = {
"lvl3_list": po.Array(po.Array(po.Array(po.BigInt()))),
"lvl1_list": po.Array(po.BigInt()),
"lvl1_struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}),
"lvl2_struct": po.Struct(
**{
"a": po.BigInt(),
"lvl1_struct": po.Struct(
**{"c": po.BigInt(), "d": po.BigInt()}
),
}
),
"list_nests_struct": po.Array(
po.Array(po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}))
),
"struct_nests_list": po.Struct(
**{
"struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}),
"list": po.Array(po.BigInt()),
}
),
}
schema = po.Struct(**schema)
lvl3_list = [
rd.choice(
[
None,
[
[
[
rd.choice([None, np.random.randint(1, 3)])
for z in range(np.random.randint(1, 3))
]
for z in range(np.random.randint(0, 3))
]
for y in range(np.random.randint(0, 3))
],
]
)
for x in range(size)
]
lvl1_list = [
[
rd.choice([None, np.random.randint(0, 3)])
for y in range(np.random.randint(1, 4))
]
for x in range(size)
]
lvl1_struct = [
rd.choice([None, (np.random.randint(0, 3), np.random.randint(0, 3))])
for x in range(size)
]
lvl2_struct = [
rd.choice(
[
None,
(
rd.choice([None, np.random.randint(0, 3)]),
(
rd.choice([None, np.random.randint(0, 3)]),
np.random.randint(0, 3),
),
),
]
)
for x in range(size)
]
list_nests_struct = [
[
[rd.choice(lvl1_struct), rd.choice(lvl1_struct)]
for y in range(np.random.randint(1, 4))
]
for x in range(size)
]
struct_nests_list = [(lvl1_struct[x], lvl1_list[x]) for x in range(size)]
df = pd.DataFrame(
{
"lvl3_list": lvl3_list,
"lvl1_list": lvl1_list,
"lvl1_struct": lvl1_struct,
"lvl2_struct": lvl2_struct,
"list_nests_struct": list_nests_struct,
"struct_nests_list": struct_nests_list,
}
)
writer = po.Writer(buff, schema, stripe_size=1024)
tuples = list(
map(
lambda x: (None,) if x[0] is pd.NA else x,
list(df.itertuples(index=False, name=None)),
)
)
writer.writerows(tuples)
writer.close()
return buff
@pytest.fixture(scope="module")
def list_struct_buff():
return generate_list_struct_buff()
@pytest.mark.parametrize(
"columns",
[
None,
["lvl3_list", "list_nests_struct", "lvl2_struct", "struct_nests_list"],
["lvl2_struct", "lvl1_struct"],
],
)
@pytest.mark.parametrize("num_rows", [0, 15, 1005, 10561, 100_000])
@pytest.mark.parametrize("use_index", [True, False])
def test_lists_struct_nests(columns, num_rows, use_index, list_struct_buff):
gdf = cudf.read_orc(
list_struct_buff,
columns=columns,
num_rows=num_rows,
use_index=use_index,
)
pyarrow_tbl = pyarrow.orc.ORCFile(list_struct_buff).read()
pyarrow_tbl = (
pyarrow_tbl[:num_rows]
if columns is None
else pyarrow_tbl.select(columns)[:num_rows]
)
if num_rows > 0:
assert pyarrow_tbl.equals(gdf.to_arrow())
else:
assert_eq(pyarrow_tbl.to_pandas(), gdf)
@pytest.mark.parametrize("columns", [None, ["lvl1_struct"], ["lvl1_list"]])
def test_skip_rows_for_nested_types(columns, list_struct_buff):
with pytest.raises(
RuntimeError, match="skip_rows is not supported by nested column"
):
cudf.read_orc(
list_struct_buff, columns=columns, use_index=True, skiprows=5,
)
def test_pyspark_struct(datadir):
path = datadir / "TestOrcFile.testPySparkStruct.orc"
pdf = pa.orc.ORCFile(path).read().to_pandas()
gdf = cudf.read_orc(path)
assert_eq(pdf, gdf)
def gen_map_buff(size=10000):
from string import ascii_letters as al
rd = random.Random(1)
np.random.seed(seed=1)
buff = BytesIO()
schema = {
"lvl1_map": po.Map(key=po.String(), value=po.BigInt()),
"lvl2_map": po.Map(key=po.String(), value=po.Array(po.BigInt())),
"lvl2_struct_map": po.Map(
key=po.String(),
value=po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}),
),
}
schema = po.Struct(**schema)
lvl1_map = [
rd.choice(
[
None,
[
(
rd.choice(al),
rd.choice([None, np.random.randint(1, 1500)]),
)
for y in range(2)
],
]
)
for x in range(size)
]
lvl2_map = [
rd.choice(
[
None,
[
(
rd.choice(al),
rd.choice(
[
None,
[
rd.choice(
[None, np.random.randint(1, 1500)]
)
for z in range(5)
],
]
),
)
for y in range(2)
],
]
)
for x in range(size)
]
lvl2_struct_map = [
rd.choice(
[
None,
[
(
rd.choice(al),
rd.choice(
[
None,
(
rd.choice(
[None, np.random.randint(1, 1500)]
),
rd.choice(
[None, np.random.randint(1, 1500)]
),
),
]
),
)
for y in range(2)
],
]
)
for x in range(size)
]
pdf = pd.DataFrame(
{
"lvl1_map": lvl1_map,
"lvl2_map": lvl2_map,
"lvl2_struct_map": lvl2_struct_map,
}
)
writer = po.Writer(
buff, schema, stripe_size=1024, compression=po.CompressionKind.NONE
)
tuples = list(
map(
lambda x: (None,) if x[0] is pd.NA else x,
list(pdf.itertuples(index=False, name=None)),
)
)
writer.writerows(tuples)
writer.close()
return buff
map_buff = gen_map_buff(size=100000)
@pytest.mark.parametrize(
"columns",
[None, ["lvl1_map", "lvl2_struct_map"], ["lvl2_struct_map", "lvl2_map"]],
)
@pytest.mark.parametrize("num_rows", [0, 15, 1005, 10561, 100000])
@pytest.mark.parametrize("use_index", [True, False])
def test_map_type_read(columns, num_rows, use_index):
tbl = pa.orc.ORCFile(map_buff).read()
lvl1_map = (
tbl["lvl1_map"]
.combine_chunks()
.view(pa.list_(pa.struct({"key": pa.string(), "value": pa.int64()})))
)
lvl2_map = (
tbl["lvl2_map"]
.combine_chunks()
.view(
pa.list_(
pa.struct({"key": pa.string(), "value": pa.list_(pa.int64())})
)
)
)
lvl2_struct_map = (
tbl["lvl2_struct_map"]
.combine_chunks()
.view(
pa.list_(
pa.struct(
{
"key": pa.string(),
"value": pa.struct({"a": pa.int64(), "b": pa.int64()}),
}
)
)
)
)
expected_tbl = pa.table(
{
"lvl1_map": lvl1_map,
"lvl2_map": lvl2_map,
"lvl2_struct_map": lvl2_struct_map,
}
)
gdf = cudf.read_orc(
map_buff, columns=columns, num_rows=num_rows, use_index=use_index
)
expected_tbl = (
expected_tbl[:num_rows]
if columns is None
else expected_tbl.select(columns)[:num_rows]
)
if num_rows > 0:
assert expected_tbl.equals(gdf.to_arrow())
else:
assert_eq(expected_tbl.to_pandas(), gdf)
@pytest.mark.parametrize(
"data", [["_col0"], ["FakeName", "_col0", "TerriblyFakeColumnName"]]
)
def test_orc_reader_decimal(datadir, data):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path, decimal_cols_as_float=data).to_pandas()
# Convert the decimal dtype from PyArrow to float64 for comparison to cuDF
# This is because cuDF returns as float64
pdf = pdf.apply(pd.to_numeric)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("data", [["InvalidColumnName"]])
def test_orc_reader_decimal_invalid_column(datadir, data):
path = datadir / "TestOrcFile.decimal.orc"
try:
orcfile = pa.orc.ORCFile(path)
except pa.ArrowIOError as e:
pytest.skip(".orc file is not found: %s" % e)
pdf = orcfile.read().to_pandas()
gdf = cudf.read_orc(path, decimal_cols_as_float=data).to_pandas()
# Since the `decimal_cols_as_float` column name
# is invalid, this should be a decimal
assert_eq(pdf, gdf)
# This test case validates the issue raised in #8665,
# please check the issue for more details.
def test_orc_timestamp_read(datadir):
path = datadir / "TestOrcFile.timestamp.issue.orc"
pdf = pd.read_orc(path)
gdf = cudf.read_orc(path)
assert_eq(pdf, gdf)
def dec(num):
return decimal.Decimal(str(num))
@pytest.mark.parametrize(
"data",
[
# basic + nested strings
{
"lls": [[["a"], ["bb"]] * 5 for i in range(12345)],
"lls2": [[["ccc", "dddd"]] * 6 for i in range(12345)],
"ls_dict": [["X"] * 7 for i in range(12345)],
"ls_direct": [[str(i)] * 9 for i in range(12345)],
"li": [[i] * 11 for i in range(12345)],
"lf": [[i * 0.5] * 13 for i in range(12345)],
"ld": [[dec(i / 2)] * 15 for i in range(12345)],
},
# with nulls
{
"ls": [
[str(i) if i % 5 else None, str(2 * i)] if i % 2 else None
for i in range(12345)
],
"li": [[i, i * i, i % 2] if i % 3 else None for i in range(12345)],
"ld": [
[dec(i), dec(i / 2) if i % 7 else None] if i % 5 else None
for i in range(12345)
],
},
# with empty elements
{
"ls": [
[str(i), str(2 * i)] if i % 2 else [] for i in range(12345)
],
"lls": [
[[str(i), str(2 * i)]] if i % 2 else [[], []]
for i in range(12345)
],
"li": [[i, i * i, i % 2] if i % 3 else [] for i in range(12345)],
"lli": [
[[i], [i * i], [i % 2]] if i % 3 else [[]]
for i in range(12345)
],
"ld": [
[dec(i), dec(i / 2)] if i % 5 else [] for i in range(12345)
],
},
# variable list lengths
{
"ls": [[str(i)] * i for i in range(123)],
"li": [[i, i * i] * i for i in range(123)],
"ld": [[dec(i), dec(i / 2)] * i for i in range(123)],
},
# many child elements (more that max_stripe_rows)
{"li": [[i] * 1100 for i in range(11000)]},
],
)
def test_orc_writer_lists(data):
pdf_in = pd.DataFrame(data)
buffer = BytesIO()
cudf.from_pandas(pdf_in).to_orc(
buffer, stripe_size_rows=2048, row_index_stride=512
)
pdf_out = pa.orc.ORCFile(buffer).read().to_pandas()
assert_eq(pdf_out, pdf_in)
def test_chunked_orc_writer_lists():
num_rows = 12345
pdf_in = pd.DataFrame(
{
"ls": [[str(i), str(2 * i)] for i in range(num_rows)],
"ld": [[dec(i / 2)] * 5 for i in range(num_rows)],
}
)
gdf = cudf.from_pandas(pdf_in)
expect = | pd.concat([pdf_in, pdf_in]) | pandas.concat |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
PeriodIndex,
Series,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float
class TestFillNA:
def test_fillna_datetime(self, datetime_frame):
tf = datetime_frame
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = datetime_frame.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all()
padded = datetime_frame.fillna(method="pad")
assert np.isnan(padded.loc[padded.index[:5], "A"]).all()
assert (
padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"]
).all()
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna()
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_frame.fillna(5, method="ffill")
def test_fillna_mixed_type(self, float_string_frame):
mf = float_string_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
# TODO: make stronger assertion here, GH 25640
mf.fillna(value=0)
mf.fillna(method="pad")
def test_fillna_mixed_float(self, mixed_float_frame):
# mixed numeric (but no float16)
mf = mixed_float_frame.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype={"C": None})
result = mf.fillna(method="pad")
_check_mixed_float(result, dtype={"C": None})
def test_fillna_empty(self):
# empty frame (GH#2778)
df = DataFrame(columns=["x"])
for m in ["pad", "backfill"]:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
def test_fillna_different_dtype(self):
# with different dtype (GH#3386)
df = DataFrame(
[["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]]
)
result = df.fillna({2: "foo"})
expected = DataFrame(
[["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]]
)
tm.assert_frame_equal(result, expected)
return_value = df.fillna({2: "foo"}, inplace=True)
tm.assert_frame_equal(df, expected)
assert return_value is None
def test_fillna_limit_and_value(self):
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
tm.assert_frame_equal(result, expected)
def test_fillna_datelike(self):
# with datelike
# GH#6344
df = DataFrame(
{
"Date": [NaT, Timestamp("2014-1-1")],
"Date2": [Timestamp("2013-1-1"), NaT],
}
)
expected = df.copy()
expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"])
result = df.fillna(value={"Date": df["Date2"]})
tm.assert_frame_equal(result, expected)
def test_fillna_tzaware(self):
# with timezone
# GH#15855
df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="pad"), exp)
df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]})
exp = DataFrame(
{
"A": [
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
}
)
tm.assert_frame_equal(df.fillna(method="bfill"), exp)
def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH#15522
df = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1, 2, np.nan, np.nan],
}
)
result = df.fillna(method="pad")
expected = DataFrame(
{
"A": date_range("20130101", periods=4, tz="US/Eastern"),
"B": [1.0, 2.0, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_na_actions_categorical(self):
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
cat3 = | Categorical([1, 2, 3], categories=[1, 2, 3]) | pandas.Categorical |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import torch
from neuralprophet import NeuralProphet, set_random_seed
from neuralprophet import df_utils
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 256
EPOCHS = 2
BATCH_SIZE = 64
LR = 1.0
PLOT = False
def test_names():
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test():
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=10,
n_forecasts=3,
ar_sparsity=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
df = m._handle_missing_data(df, freq="D", predicting=False)
df_train, df_test = m.split_df(df, freq="D", valid_p=0.1)
metrics = m.fit(df_train, freq="D", validation_df=df_test)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_df_utils_func():
log.info("testing: df_utils Test")
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
# test find_time_threshold
df_dict, _ = df_utils.prep_copy_df_dict(df)
time_threshold = df_utils.find_time_threshold(df_dict, n_lags=2, valid_p=0.2, inputs_overbleed=True)
df_train, df_val = df_utils.split_considering_timestamp(
df_dict, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold
)
# init data params with a list
global_data_params = df_utils.init_data_params(df_dict, normalize="soft")
global_data_params = df_utils.init_data_params(df_dict, normalize="soft1")
global_data_params = df_utils.init_data_params(df_dict, normalize="standardize")
log.debug("Time Threshold: \n {}".format(time_threshold))
log.debug("Df_train: \n {}".format(type(df_train)))
log.debug("Df_val: \n {}".format(type(df_val)))
def test_trend():
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
growth="linear",
n_changepoints=10,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_custom_changepoints():
log.info("testing: Custom Changepoints")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
dates_list = [str(d) for d in dates]
dates_array = pd.to_datetime(dates_list).values
log.debug("dates: {}".format(dates))
log.debug("dates_list: {}".format(dates_list))
log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
for cp in [dates_list, dates_array]:
m = NeuralProphet(
changepoints=cp,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend():
log.info("testing: No-Trend")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
growth="off",
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_seasons():
log.info("testing: Seasonality: additive")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="additive",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("SUM of yearly season params: {}".format(sum(abs(m.model.season_params["yearly"].data.numpy()))))
log.debug("SUM of weekly season params: {}".format(sum(abs(m.model.season_params["weekly"].data.numpy()))))
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
log.info("testing: Seasonality: multiplicative")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
# m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
def test_custom_seasons():
log.info("testing: Custom Seasonality")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
other_seasons = False
m = NeuralProphet(
yearly_seasonality=other_seasons,
weekly_seasonality=other_seasons,
daily_seasonality=other_seasons,
seasonality_mode="additive",
# seasonality_mode="multiplicative",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
log.debug("seasonalities: {}".format(m.season_config.periods))
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar():
log.info("testing: AR")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=7,
yearly_seasonality=False,
epochs=EPOCHS,
# batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_sparse():
log.info("testing: AR (sparse")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=3,
n_lags=14,
ar_sparsity=0.5,
yearly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_deep():
log.info("testing: AR-Net (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg():
log.info("testing: Lagged Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
m = m.add_lagged_regressor(names="A")
m = m.add_lagged_regressor(names="B", only_last_value=True)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=10)
forecast = m.predict(future)
if PLOT:
print(forecast.to_string())
m.plot_last_forecast(forecast, include_previous_forecasts=5)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg_deep():
log.info("testing: List of Lagged Regressors (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=1,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
cols = [col for col in df.columns if col not in ["ds", "y"]]
m = m.add_lagged_regressor(names=cols)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
forecast = m.predict(df)
if PLOT:
# print(forecast.to_string())
# m.plot_last_forecast(forecast, include_previous_forecasts=10)
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_events():
log.info("testing: Events")
df = pd.read_csv(PEYTON_FILE)[-NROWS:]
playoffs = pd.DataFrame(
{
"event": "playoff",
"ds": pd.to_datetime(
[
"2008-01-13",
"2009-01-03",
"2010-01-16",
"2010-01-24",
"2010-02-07",
"2011-01-08",
"2013-01-12",
"2014-01-12",
"2014-01-19",
"2014-02-02",
"2015-01-11",
"2016-01-17",
"2016-01-24",
"2016-02-07",
]
),
}
)
superbowls = pd.DataFrame(
{
"event": "superbowl",
"ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
}
)
events_df = pd.concat((playoffs, superbowls))
m = NeuralProphet(
n_lags=2,
n_forecasts=30,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# set event windows
m = m.add_events(
["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
)
# add the country specific holidays
m = m.add_country_holidays("US", mode="additive", regularization=0.5)
m.add_country_holidays("Indonesia")
m.add_country_holidays("Thailand")
m.add_country_holidays("Philippines")
m.add_country_holidays("Pakistan")
m.add_country_holidays("Belarus")
history_df = m.create_df_with_events(df, events_df)
metrics_df = m.fit(history_df, freq="D")
future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
forecast = m.predict(df=future)
log.debug("Event Parameters:: {}".format(m.model.event_params))
if PLOT:
m.plot_components(forecast)
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_future_reg():
log.info("testing: Future Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
regressors_df_future = pd.DataFrame(data={"A": df["A"][-50:], "B": df["B"][-50:]})
df = df[:-50]
m = m.add_future_regressor(name="A")
m = m.add_future_regressor(name="B", mode="multiplicative")
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_plot():
log.info("testing: Plotting")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=m.n_forecasts, n_historic_predictions=10)
forecast = m.predict(future)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
m.highlight_nth_step_ahead_of_each_forecast(7)
forecast = m.predict(df)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
if PLOT:
plt.show()
def test_air_data():
log.info("TEST air_passengers.csv")
df = pd.read_csv(AIR_FILE)
m = NeuralProphet(
n_changepoints=0,
yearly_seasonality=2,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="MS")
future = m.make_future_dataframe(df, periods=48, n_historic_predictions=len(df) - m.n_lags)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_random_seed():
log.info("TEST random seed")
df = pd.read_csv(PEYTON_FILE, nrows=512)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum1 = sum(forecast["yhat1"].values)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum2 = sum(forecast["yhat1"].values)
set_random_seed(1)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum3 = sum(forecast["yhat1"].values)
log.debug("should be same: {} and {}".format(checksum1, checksum2))
log.debug("should not be same: {} and {}".format(checksum1, checksum3))
assert math.isclose(checksum1, checksum2)
assert not math.isclose(checksum1, checksum3)
def test_yosemite():
log.info("TEST Yosemite Temps")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
changepoints_range=0.95,
n_changepoints=15,
weekly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="5min")
future = m.make_future_dataframe(df, periods=12 * 24, n_historic_predictions=12 * 24)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_model_cv():
log.info("CV from model")
def check_simple(df):
m = NeuralProphet(
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq="D", k=5, fold_pct=0.1, fold_overlap_pct=0.5)
assert all([70 + i * 5 == len(train) for i, (train, val) in enumerate(folds)])
assert all([10 == len(val) for (train, val) in folds])
def check_cv(df, freq, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct):
m = NeuralProphet(
n_lags=n_lags,
n_forecasts=n_forecasts,
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq=freq, k=k, fold_pct=fold_pct, fold_overlap_pct=fold_overlap_pct)
total_samples = len(df) - m.n_lags + 2 - (2 * m.n_forecasts)
per_fold = int(fold_pct * total_samples)
not_overlap = per_fold - int(fold_overlap_pct * per_fold)
assert all([per_fold == len(val) - m.n_lags + 1 - m.n_forecasts for (train, val) in folds])
assert all(
[
total_samples - per_fold - (k - i - 1) * not_overlap == len(train) - m.n_lags + 1 - m.n_forecasts
for i, (train, val) in enumerate(folds)
]
)
check_simple(pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}))
check_cv(
df=pd.DataFrame({"ds": | pd.date_range(start="2017-01-01", periods=100) | pandas.date_range |
import pandas as pd
import numpy as np
def groupby_reset(col):
colname = "'%s'" % col
df = (
| pd.groupby([colname]) | pandas.groupby |
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from pandas import DataFrame
class Helper_Functions:
def add_columns(self):
""" When activated it will add a column
into existing dataframe
"""
df = pd.DataFrame({10, 20, 40, 60, 70, 80, 80, 100, 110, 120})
names = pd.DataFrame({'Mario', 'Sonic', 'Sly', 'Duke', 'Tails', 'Luigi', 'CaptainMak', 'Crash', 'Knuckles', 'Tupac'})
return df
def train_validate_test(self):
""" Function will split data into
train and test sets for machine learning
"""
df = | pd.DataFrame({10, 20, 40, 60, 70, 80, 80, 100, 110}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from math import pi
from numpy import sign, nan, append, zeros, array, sqrt, where
from numpy import max as max_
from pandas import Series, DataFrame, concat
from pandapower.pypower.idx_gen import GEN_BUS, PMIN, PMAX, QMIN, QMAX, GEN_STATUS
from pandapower.pypower.idx_cost import COST, NCOST
from pandapower.pypower.idx_bus import BUS_I, BASE_KV
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from pypower import ppoption, runpf, runopf, rundcpf, rundcopf
ppopt = ppoption.ppoption(VERBOSE=0, OUT_ALL=0)
pypower_import = True
except ImportError:
pypower_import = False
ppc_elms = ["bus", "branch", "gen"]
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, COST:]) == 2*ppc['gencost'][idx, NCOST]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
raise NotImplementedError
pp.create_pwl_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
ncost = ppc['gencost'][idx, NCOST]
if ncost == 2:
cp2 = 0
cp1 = ppc['gencost'][idx, COST]
cp0 = ppc['gencost'][idx, COST + 1]
elif ncost == 3:
cp2 = ppc['gencost'][idx, COST]
cp1 = ppc['gencost'][idx, COST + 1]
cp0 = ppc['gencost'][idx, COST + 2]
pp.create_poly_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
cp1_eur_per_mw=cp1, cp2_eur_per_mw2=cp2, cp0_eur=cp0)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def _gen_bus_info(ppc, idx_gen):
bus_name = int(ppc["gen"][idx_gen, GEN_BUS])
# assumption: there is only one bus with this bus_name:
idx_bus = int(where(ppc["bus"][:, BUS_I] == bus_name)[0][0])
current_bus_type = int(ppc["bus"][idx_bus, 1])
same_bus_gen_idx = where(ppc["gen"][:, GEN_BUS] == ppc["gen"][idx_gen, GEN_BUS])[0].astype(int)
same_bus_in_service_gen_idx = same_bus_gen_idx[where(ppc["gen"][same_bus_gen_idx, GEN_STATUS] > 0)]
first_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[0] if len(
same_bus_in_service_gen_idx) else None
last_same_bus_in_service_gen_idx = same_bus_in_service_gen_idx[-1] if len(
same_bus_in_service_gen_idx) else None
return current_bus_type, idx_bus, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
For running the validation, the ppc must already contain the pypower
powerflow results or pypower must be importable.
****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, BASE_KV] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 10], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_mw=-ppc['bus'][i, 2], q_mvar=-ppc['bus'][i, 3],
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_mw=ppc['bus'][i, 2], q_mvar=ppc['bus'][i, 3],
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_mw=ppc['bus'][i, 4],
q_mvar=-ppc['bus'][i, 5])
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
for i in range(len(ppc['gen'][:, 0])):
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc, i)
# create ext_grid
if current_bus_type == 3:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create gen
elif current_bus_type == 2:
if i == first_same_bus_in_service_gen_idx:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][last_same_bus_in_service_gen_idx, 5],
p_mw=ppc['gen'][i, 1],
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN])
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
else:
current_bus_type = 1
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_mw=ppc['gen'][i, 1],
q_mvar=ppc['gen'][i, 2], type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],
max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN],
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_mw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0): # create line
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol', max_loading_percent=100,
in_service=bool(ppc['branch'][i, 10]))
else: # create transformer
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tap_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tap_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5]
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vk_percent=sign(xk) * zk * sn * 100 / baseMVA,
vkr_percent=rk * sn * 100 / baseMVA, max_loading_percent=100,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tap_step_percent=abs(ratio_1) if ratio_1 else nan,
tap_pos=sign(ratio_1) if ratio_1 else nan,
tap_side=tap_side if ratio_1 else None, tap_neutral=0 if ratio_1 else nan)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, -1))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net, **kwargs):
logger.error("Validation failed.")
return net
def _validate_diff_res(diff_res, max_diff_values):
to_iterate = set(max_diff_values.keys()) & {'gen_q_mvar', 'branch_p_mw', 'branch_q_mvar',
'gen_p_mw', 'bus_va_degree', 'bus_vm_pu'}
if not len(to_iterate):
logger.warning("There are no keys to validate.")
val = True
for i in to_iterate:
elm = i.split("_")[0]
sought = ["p", "q"] if elm != "bus" else ["vm", "va"]
col = int(array([0, 1])[[j in i for j in sought]][0]) if elm != "branch" else \
list(array([[0, 2], [1, 3]])[[j in i for j in sought]][0])
val &= bool(max_(abs(diff_res[elm][:, col])) < max_diff_values[i])
return val
def validate_from_ppc(ppc_net, net, pf_type="runpp", max_diff_values={
"bus_vm_pu": 1e-6, "bus_va_degree": 1e-5, "branch_p_mw": 1e-6, "branch_q_mvar": 1e-6,
"gen_p_mw": 1e-6, "gen_q_mvar": 1e-6}, run=True):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file, which must already contain the pypower powerflow
results or pypower must be importable.
**net** - The pandapower network.
OPTIONAL:
**pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",
"rundcpp", "runopp", "rundcopp")
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and
the values floats.
**run** (True, bool or list of two bools) - changing the value to False avoids trying to run
(optimal) loadflows. Giving a list of two bools addresses first pypower and second
pandapower.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net or pypower is importable.
"""
# check in case of optimal powerflow comparison whether cost information exist
if "opp" in pf_type:
if not (len(net.polynomial_cost) | len(net.piecewise_linear_cost)):
if "gencost" in ppc_net:
if not len(ppc_net["gencost"]):
logger.debug('ppc and pandapower net do not include cost information.')
return True
else:
logger.error('The pandapower net does not include cost information.')
return False
else:
logger.debug('ppc and pandapower net do not include cost information.')
return True
# guarantee run parameter as list, for pypower and pandapower (optimal) powerflow run
run = [run, run] if isinstance(run, bool) else run
# --- check pypower powerflow success, if possible
if pypower_import and run[0]:
try:
if pf_type == "runpp":
ppc_net = runpf.runpf(ppc_net, ppopt)[0]
elif pf_type == "rundcpp":
ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0]
elif pf_type == "runopp":
ppc_net = runopf.runopf(ppc_net, ppopt)
elif pf_type == "rundcopp":
ppc_net = rundcopf.rundcopf(ppc_net, ppopt)
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
except:
logger.debug("The pypower run did not work.")
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
if run[1]:
if pf_type == "runpp":
try:
pp.runpp(net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(net, trafo_model="pi", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
elif pf_type == "rundcpp":
try:
pp.rundcpp(net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower dc powerflow does not converge.')
elif pf_type == "runopp":
try:
pp.runopp(net, init="flat", calculate_voltage_angles=True)
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=True)
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
try:
pp.runopp(net, init="flat", calculate_voltage_angles=False)
logger.info("voltage_angles could be calculated.")
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
except pp.OPFNotConverged:
try:
pp.runopp(net, init="pf", calculate_voltage_angles=False)
if "bus_va_degree" in max_diff_values.keys():
max_diff_values["bus_va_degree"] = 1e2 if max_diff_values[
"bus_va_degree"] < 1e2 else max_diff_values["bus_va_degree"]
logger.info("voltage_angles could be calculated.")
except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError):
logger.error('The pandapower optimal powerflow does not converge.')
elif pf_type == "rundcopp":
try:
pp.rundcopp(net)
except pp.LoadflowNotConverged:
logger.error('The pandapower dc optimal powerflow does not converge.')
else:
raise ValueError("The pf_type %s is unknown" % pf_type)
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if not ppc_success:
return False
if "opp" in pf_type:
if not net.OPF_converged:
return
elif not net.converged:
return False
# --- store pypower powerflow results
ppc_res = dict.fromkeys(ppc_elms)
ppc_res["branch"] = ppc_net['branch'][:, 13:17]
ppc_res["bus"] = ppc_net['bus'][:, 7:9]
ppc_res["gen"] = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res = dict.fromkeys(ppc_elms)
pp_res["bus"] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res["gen"] = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc_net["gen"].shape) == 1:
ppc_net["gen"] = array(ppc_net["gen"], ndmin=2)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
GEN_uniq = GENS.drop_duplicates()
already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int),
index=[int(v) for v in GEN_uniq.values])
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, \
last_same_bus_in_service_gen_idx = _gen_bus_info(ppc_net, i)
if current_bus_type == 3 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_ext_grid[
net.ext_grid.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
elif current_bus_type == 2 and i == first_same_bus_in_service_gen_idx:
pp_res["gen"] = append(pp_res["gen"], array(net.res_gen[
net.gen.bus == current_bus_idx][['p_mw', 'q_mvar']]).reshape((1, 2)), 0)
else:
pp_res["gen"] = append(pp_res["gen"], array(net.res_sgen[
net.sgen.bus == current_bus_idx][['p_mw', 'q_mvar']])[
already_used_gen.at[int(j)]].reshape((1, 2)), 0)
already_used_gen.at[int(j)] += 1
change_q_compare += [int(j)]
pp_res["gen"] = pp_res["gen"][1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res["branch"] = zeros([1, 4])
# consideration of parallel branches via storing how often branches were considered
# each node-to-node-connection
try:
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1,
sort=True).drop_duplicates()
init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1,
sort=True).drop_duplicates()
except TypeError:
# legacy pandas < 0.21
init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates()
init2 = | concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1) | pandas.concat |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = | pd.DataFrame.from_dict(dicLDA) | pandas.DataFrame.from_dict |
# create dataframes based on the available data
import json
import os
import geopandas as gpd
import pandas as pd
import rasterio
import rasterstats
import time
import random
def generate_dataframe(shapefile, raster):
# Read the shapefile and convert its crs
districts = gpd.read_file(shapefile)
s = shapefile.split("/")
e = s[6].split("_")
nuts = e[1].split(".")[0]
reg = e[0]
#exclusive for europe shapefile
try:
districts["GEN"]=districts["NAME"]
districts["SHN"]=districts["NUTS"]
except:
pass
# districts.drop(["GEN"], axis=1)
# districts["GEN"]= districts["POPULATION"]
districts = districts.to_crs('epsg:4326')
# convert it into a geoJson file
districts.to_file("./data/geojsonFiles/geojson_{}_{}".format(reg, nuts), driver="GeoJSON", encoding="utf-8")
with open("./data/geojsonFiles/geojson_{}_{}".format(reg, nuts), encoding="utf-8") as geofile:
geojson_layer = json.load(geofile)
# concordance between the df and the geojson file based on an 'id' key
state_id_map = {}
for feature in geojson_layer['features']:
feature['id'] = feature['properties']['GEN']
state_id_map[feature['properties']['SHN']] = feature['id']
districts['id'] = districts['SHN'].apply(lambda x: state_id_map[x])
# import the raster file
rf = rasterio.open(raster, mode='r')
# get the stats from the raster and the shapefile
# Assign raster values to a numpy nd array
polluant = rf.read(1)
affine = rf.transform
# Calculating the zonal statistics
avg_pl = rasterstats.zonal_stats(districts,
polluant,
affine=affine,
stats=['mean', 'min', 'max', 'std'],
geojson_out=True)
# Extracting the average data from the list
avg_poll = []
i = 0
while i < len(avg_pl):
avg_poll.append(avg_pl[i]['properties'])
i = i + 1
# Transfering the infromation from the list to a pandas DataFrame
avg_pl_gr = pd.DataFrame(avg_poll)
districts["mean"] = avg_pl_gr["mean"]
districts["min"] = avg_pl_gr["min"]
districts["max"] = avg_pl_gr["max"]
districts["std"] = avg_pl_gr["std"]
districts ["nuts"] = nuts
districts ["zone"] = reg
districts ["COVID Cases"] = random.randint(10000,30000)
keep = ["SHN", "GEN", "mean", "min", "max", "std", "nuts", "zone", "id", "COVID Cases"]
districts = districts[[c for c in districts.columns if c in keep]]
return districts, geojson_layer
def rem_dir(saving_path, p, n):
names = os.listdir(p)
names = pd.DataFrame(names)
names.to_excel("{}/{}.xlsx".format(saving_path, n), index=False)
return
def create_csvs(path):
zones = os.listdir(path)
rem_dir(path, path, "ancienCountries")
for zone in zones:
if ".xlsx" not in zone:
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
poll = os.listdir(('{}/{}/polluant').format(path, zone))
#
rem_dir(('{}/{}/shapefiles/nuts1').format(path, zone),
('{}/{}/shapefiles/nuts1').format(path, zone),
"{}_nuts1".format(zone))
rem_dir(('{}/{}/shapefiles/nuts2').format(path, zone),
('{}/{}/shapefiles/nuts2').format(path, zone),
"{}_nuts2".format(zone))
rem_dir(('{}/{}/shapefiles/nuts3').format(path, zone),
('{}/{}/shapefiles/nuts3').format(path, zone),
"{}_nuts3".format(zone))
rem_dir(('{}/{}/polluant').format(path, zone),
('{}/{}/polluant').format(path, zone),
"{}_polluants".format(zone))
#
for p in poll:
if ".xlsx" not in p:
ppath = os.path.join(
('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
rem_dir(('{}/{}/polluant/{}').format(path, zone, p),
('{}/{}/polluant/{}').format(path, zone, p),
"{}_polluants_{}".format(zone, p))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
return
def generate_from_countries(path, zone):
'''
This function allows the generation of csvs based on a given path and specific region
'''
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
poll = os.listdir(('{}/{}/polluant').format(path, zone))
#
rem_dir(('{}/{}/shapefiles/nuts1').format(path, zone),
('{}/{}/shapefiles/nuts1').format(path,
zone), "{}_nuts1".format(zone))
rem_dir(('{}/{}/shapefiles/nuts2').format(path, zone),
('{}/{}/shapefiles/nuts2').format(path,
zone), "{}_nuts2".format(zone))
rem_dir(('{}/{}/shapefiles/nuts3').format(path, zone),
('{}/{}/shapefiles/nuts3').format(path,
zone), "{}_nuts3".format(zone))
rem_dir(('{}/{}/polluant').format(path, zone),
('{}/{}/polluant').format(path, zone), "{}_polluants".format(zone))
#
for p in poll:
ppath = os.path.join(('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
rem_dir(('{}/{}/polluant/{}').format(path, zone, p),
('{}/{}/polluant/{}').format(path, zone, p),
"{}_polluants_{}".format(zone, p))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
return
def generate_from_polluant(path, zone, polluant):
pelement = os.listdir(('{}/{}/polluant/{}').format(path, zone, polluant))
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
return
def generate_from_nuts(n, path, zone, nutfile):
poll = os.listdir(('{}/{}/polluant').format(path, zone))
for p in poll:
if ".xlsx" not in p:
ppath = os.path.join(('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
#try:
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/{}/{}').format(
path, zone, n, nutfile),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_{}_{}_{}.xlsx').format(
zone, p, n, year, month))
#except:
# pass
return
def generate_from_tif(path, zone, p, tif):
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
return
def check_new_countries(path):
'''
This function allow us to check the countries directory and generate csvs from newly added regions
'''
zones = os.listdir(path)
ancien = | pd.read_excel("./data/rawData/ancienCountries.xlsx") | pandas.read_excel |
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
import datetime
now = datetime.datetime.now()
spotrac_ids = ['Boston Red Sox BOS', 'San Francisco Giants SF', 'Chicago Cubs CHC',
'Washington Nationals WSH', 'Los Angeles Dodgers LAD',
'Los Angeles Angels LAA', 'New York Yankees NYY', 'Toronto Blue Jays TOR',
'St. Louis Cardinals STL', 'New York Mets NYM', 'Houston Astros HOU',
'Seattle Mariners SEA', 'Texas Rangers TEX', 'Baltimore Orioles BAL',
'Colorado Rockies COL', 'Detroit Tigers DET', 'Cleveland Indians CLE',
'Arizona Diamondbacks ARI', 'Kansas City Royals KC', 'Minnesota Twins MIN',
'Atlanta Braves ATL', 'Philadelphia Phillies PHI', 'Miami Marlins MIA',
'San Diego Padres SD', 'Cincinnati Reds CIN', 'Pittsburgh Pirates PIT',
'Milwaukee Brewers MIL', 'Tampa Bay Rays TB', 'Oakland Athletics OAK',
'Chicago White Sox CHW']
fangraphs_ids = ['Red Sox','Giants','Cubs','Nationals','Dodgers','Angels',
'Yankees','Blue Jays','Cardinals','Mets','Astros','Mariners','Rangers',
'Orioles','Rockies','Tigers','Indians','Diamondbacks','Royals','Twins',
'Braves','Phillies','Marlins','Padres','Reds','Pirates','Brewers','Rays',
'Athletics','White Sox']
class TeamLeaderboard:
def __init__(self,year):
if (int(year) < 2011):
raise ValueError('Incomplete or missing data from this year! Please try another year.')
pd.options.mode.chained_assignment = None #this keeps my console from getting spammed with stupid messages
team_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,53,11,6,111,199,3,58&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=&age=&filter=&players=0' % (str(year),str(year)))
team_page = requests.get(team_url)
team_soup = bs(team_page.content, 'html.parser')
table = team_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
df = pd.read_html(str(table),header=1)
df = df[0]
df = df.iloc[:-1]
df['PA'] = pd.to_numeric(df['PA'], downcast="float")
if (min(list(df['PA'])) >= 225): #makes sure that at least a week of play has passed roughly
#This part grabs and attaches bullpen xFIP data to the main DF
bullpen_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=rel&lg=all&qual=0&type=c,6,62&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=0&age=0&filter=&players=0' % (str(year),str(year)))
bullpen_page = requests.get(bullpen_url)
bullpen_soup = bs(bullpen_page.content, 'html.parser')
table = bullpen_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
bp_df = pd.read_html(str(table),header=1)
bp_df = bp_df[0]
bp_df = bp_df.iloc[:-1]
df = df.merge(bp_df, on='Team')
#This part, a pain in the ass, attaches payroll data from SportTrac to the main DF
payroll_url = 'https://www.spotrac.com/mlb/payroll/'
payroll_page = requests.get(payroll_url)
payroll_soup = bs(payroll_page.content,'lxml')
table = payroll_soup.find_all('table')[0]
pr_df = pd.read_html(str(table),header=0)
pr_df = pr_df[0]
pr_df.loc[pr_df['Team'] != 'League Average'] #removes annoying "league average" stuff
for index in range(0,len(spotrac_ids)): #replaces spotrac ids with FanGraphs IDs
pr_df['Team'].loc[pr_df['Team'] == spotrac_ids[index]] = fangraphs_ids[index]
df = df.merge(pr_df, on='Team')
#grabs pitching WAR and Wins to determine team luckiness
pitch_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,59,4&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=0&age=0&filter=&players=0' % (str(year),str(year)))
pitch_page = requests.get(pitch_url)
pitch_soup = bs(pitch_page.content, 'html.parser')
table = pitch_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
pit_df = pd.read_html(str(table),header=1)
pit_df = pit_df[0]
pit_df = pit_df.iloc[:-1]
df = df.merge(pit_df, on='Team')
#grabs park factors for adjusting HR rate
pf_url = ('https://www.fangraphs.com/guts.aspx?type=pf&teamid=0&season=%s' % (str(year)))
pf_page = requests.get(pf_url)
pf_soup = bs(pf_page.content,'html.parser')
table = pf_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="GutsBoard1_dg1_ctl00")
pf_df = pd.read_html(str(table),header=0)
pf_df = pf_df[0]
df = df.merge(pf_df, on='Team')
#calculates NERD
df['Bat'] = pd.to_numeric(df['Bat'], downcast="float")
df['HR_x'] = pd.to_numeric(df['HR_x'], downcast="float")
df['HR_y'] = pd.to_numeric(df['HR_y'], downcast="float")
df['BsR'] = pd.to_numeric(df['BsR'], downcast="float")
df['xFIP'] = pd.to_numeric(df['xFIP'], downcast="float")
df['Def'] = pd.to_numeric(df['Def'], downcast="float")
df['Total Payroll'] = df[str(now.year) + ' Total Payroll'].replace('[\$,]', '', regex=True).astype(float)
df['Total Payroll'] = pd.to_numeric(df['Total Payroll'], downcast="float")
df['Age'] = pd.to_numeric(df['Age'], downcast="float")
df['WAR_x'] = pd.to_numeric(df['WAR_x'], downcast="float")
df['WAR_y'] = pd.to_numeric(df['WAR_y'], downcast="float")
df['W'] = pd.to_numeric(df['W'], downcast="float")
df['zBat'] = (df['Bat'] - df['Bat'].mean())/df['Bat'].std(ddof=0) #Finds Z-score values for calculation
df['HRpPA'] = df['HR_x'] / df['PA'] #calculates HR per PA per team
df['HRpPA'] = df['HRpPA'] * df['HR_y'] / 100 #adjusts for park factors
df['zHRpPA'] = (df['HRpPA'] - df['HRpPA'].mean())/df['HRpPA'].std(ddof=0)
df['zBsR'] = (df['BsR'] - df['BsR'].mean())/df['BsR'].std(ddof=0)
df['zBull'] = -((df['xFIP'] - df['xFIP'].mean())/df['xFIP'].std(ddof=0))
df['zDef'] = (df['Def'] - df['Def'].mean())/df['Def'].std(ddof=0)
df['Total Payroll'] = df['Total Payroll'].replace('[\$,]', '', regex=True).astype(float) #converts dollar amounts to numbers
df['zPay'] = -((df['Total Payroll'] - df['Total Payroll'].mean())/df['Total Payroll'].std(ddof=0))
df['zAge'] = -((df['Age'] - df['Age'].mean())/df['Age'].std(ddof=0))
df['tWAR'] = df['WAR_x'] + df['WAR_y']
df['Luck'] = (df['tWAR'] - df['W'])/20 #Adjusted for calculation
df['zPay'][df['zPay'] < 0] = 0 #replaces values. These throw warnings for no reason because Pandas can't act like R without throwing a goddamn fit
df['zAge'][df['zAge'] < 0] = 0
df['Luck'][df['Luck'] < 0] = 0
df['Luck'][df['Luck'] > 2] = 2
df['NERD'] = df['zBat'] + df['zHRpPA'] + df['zBsR'] + (df['zBull'] / 2) + (df['zDef'] / 2) + df['zPay'] + df['zAge'] + df['Luck'] #unadjusted NERD
df['NERD'] = (((df['NERD'] - min(list(df['NERD']))) * (10)) / (max(list(df['NERD'])) - min(list(df['NERD'])))) #feature scaled
self.df = df
self.year = year
else: #This does the exact same stuff as above, except it does it for the previous year if there aren't enough teams in the previous year.
pd.options.mode.chained_assignment = None #this keeps my console from getting spammed with stupid messages
team_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,53,11,6,111,199,3,58&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=&age=&filter=&players=0' % (str(year-1),str(year-1)))
team_page = requests.get(team_url)
team_soup = bs(team_page.content, 'html.parser')
table = team_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
df = pd.read_html(str(table),header=1)
df = df[0]
df = df.iloc[:-1]
bullpen_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=rel&lg=all&qual=0&type=c,6,62&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=0&age=0&filter=&players=0' % (str(year-1),str(year-1)))
bullpen_page = requests.get(bullpen_url)
bullpen_soup = bs(bullpen_page.content, 'html.parser')
table = bullpen_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
bp_df = pd.read_html(str(table),header=1)
bp_df = bp_df[0]
bp_df = bp_df.iloc[:-1]
df = df.merge(bp_df, on='Team')
#This part, a pain in the ass, attaches payroll data from SportTrac to the main DF
payroll_url = 'https://www.spotrac.com/mlb/payroll/' #yo, please don't use this to do previous years because payroll data might be off
payroll_page = requests.get(payroll_url)
payroll_soup = bs(payroll_page.content,'lxml')
table = payroll_soup.find_all('table')[0]
pr_df = pd.read_html(str(table),header=0)
pr_df = pr_df[0]
pr_df = pr_df.loc[:14].append(pr_df.loc[17:]) #removes annoying "league average" stuff
for index in range(0,len(spotrac_ids)): #replaces spotrac ids with FanGraphs IDs
pr_df['Team'].loc[pr_df['Team'] == spotrac_ids[index]] = fangraphs_ids[index]
df = df.merge(pr_df, on='Team')
#grabs pitching WAR and Wins to determine team luckiness
pitch_url = ('https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,59,4&season=%s&month=0&season1=%s&ind=0&team=0,ts&rost=0&age=0&filter=&players=0' % (str(year-1),str(year-1)))
pitch_page = requests.get(pitch_url)
pitch_soup = bs(pitch_page.content, 'html.parser')
table = pitch_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="LeaderBoard1_dg1_ctl00")
pit_df = pd.read_html(str(table),header=1)
pit_df = pit_df[0]
pit_df = pit_df.iloc[:-1]
df = df.merge(pit_df, on='Team')
#grabs park factors for adjusting HR rate
pf_url = ('https://www.fangraphs.com/guts.aspx?type=pf&teamid=0&season=%s' % (str(year-1)))
pf_page = requests.get(pf_url)
pf_soup = bs(pf_page.content,'html.parser')
table = pf_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="GutsBoard1_dg1_ctl00")
pf_df = pd.read_html(str(table),header=0)
pf_df = pf_df[0]
df = df.merge(pf_df, on='Team')
#calculates NERD
df['Bat'] = | pd.to_numeric(df['Bat'], downcast="float") | pandas.to_numeric |
import os, glob
import pandas as pd
import numpy as np
import torch
from itertools import compress
def imorphics_info():
df = pd.read_excel(os.getenv("HOME") + '/Dropbox/Z_DL/OAIDataBase/oai_active_contour_parameters.xlsx')
df['subject_id'] = df['subject_id'].values.astype(np.int32)
return df
def moaks00():
return pd.read_csv(os.getenv("HOME") + '/Dropbox/Z_DL/OAIDataBase/moaks00.csv')
def locate_lateral_and_medial(slices, pick=False):
""" Select certain slices by the z-locations"""
dess_info = oai_dess_info()
for i in list(slices.keys()):
loc = dess_info.loc[dess_info['ID'] == i]
if pick == 'lateral':
slices[i] = [x for x in slices[i] if int(x.split('_')[1]) in list(range(loc['lateral0'].values[0], loc['lateral1'].values[0] + 1))]
if pick == 'medial':
slices[i] = [x for x in slices[i] if int(x.split('_')[1]) in list(range(loc['medial0'].values[0], loc['medial1'].values[0] + 1))]
return slices
def slice_location_normalized(dess_info, id, scheme=2):
""" slices location information"""
i = int(id.split('_')[0])
s = int(id.split('_')[1])
side, s_0, s_1, lateral0, lateral1, medial0, medial1 \
= dess_info.loc[dess_info['ID'] == i,
['side', 's_0', 's_1', 'lateral0', 'lateral1', 'medial0', 'medial1']].values[0]
if scheme == 1:
if side == 2:
return (s - medial0) / (lateral1 - medial0)
elif side == 1:
return (s - medial1) / (lateral0 - medial1)
# lateral---notch---medial
# 0 1 2 3
elif scheme == 2:
if side == 2:
if s >= lateral0:
return (s - s_1) / (lateral0 - s_1)
elif s >= medial1:
return (s - lateral0) / (medial1 - lateral0) + 1
#elif s >= s_0:
# return (s - medial1) / (s_0 - medial1) + 2
else:
return (s - medial1) / (s_0 - medial1) + 2
elif side == 1:
if s <= lateral1:
return (s - s_0) / (lateral1 - s_0)
elif s <= medial0:
return (s - lateral1) / (medial0 - lateral1) + 1
#elif s <= s_1:
# return (s - medial0) / (s_1 - medial0) + 2
else:
return (s - medial0) / (s_1 - medial0) + 2
def get_ids_by_subjects(dir_img, method):
neighbors = dict()
slices = dict()
dess_info = imorphics_info()
"""Returns a list of the ids in the directory"""
if method == 'automatic':
ids = list(set([f.split('/')[-1].split('.')[0] for f in glob.glob(dir_img+'*')]))
ids = [tuple(int(y) for y in (x.split('_'))) for x in ids]
ids = sorted(ids, key=lambda element: [e for e in element])
slices_all = dict()
for x in ids:
if x[0] not in slices_all:
slices_all[x[0]] = list()
slices_all[x[0]].append('_'.join([str(y) for y in x]))
subjects = list(slices_all.keys())
subjects.sort()
for s in subjects:
slices[s] = slices_all[s]
if method == 'manual':
subjects = list(dess_info['ID'])
for s in subjects:
slice_range = list(range(dess_info.loc[dess_info['ID'] == s, 's_0'].values[0],
dess_info.loc[dess_info['ID'] == s, 's_1'].values[0] + 4, 4))
slices[s] = [str(s) + '_' + str(y) for y in slice_range]
for x in slices[s]:
neighbors[x] = [x]
return subjects, slices
def imorphics_dess_info():
dess_info = oai_dess_info()
subjects = [(dess_info.iloc[i]['subject_id'], dess_info.iloc[i]['tags'] + '_imorphics') for i in
range(len(dess_info['ID']))]
crop = {'subjects': subjects,
'cropx0': dess_info['cropx0'], 'cropx1': dess_info['cropx1'],
'cropy0': dess_info['cropy0'], 'cropy1': dess_info['cropy1']}
crop = pd.DataFrame(crop)
zrange = {'subjects': subjects, 's_0': dess_info['s_0'], 's_1': dess_info['s_1']}
zrange = pd.DataFrame(zrange)
return subjects, crop, zrange
def oai_mri_info(dir_img, dir_tag, crop_file):
subjects = glob.glob(dir_img + dir_tag + '/*')
subjects.sort()
subjects = [(int(x.split(dir_img + dir_tag + '/')[1].split('.')[0]), dir_tag) for x in subjects]
if crop_file is not None:
crop_file['subjects'] = subjects
crop = crop_file[['subjects', 'cropx0', 'cropx1', 'cropy0', 'cropy1']]
zrange = crop_file[['subjects', 's_0', 's_1']]
else:
crop = None
zrange = None
return subjects, crop, zrange
def dess_crop_files(x):
df = pd.read_csv(x, header=None)
df.columns = ['ID', 's_0', 's_1', 'cropx0', 'cropx1', 'cropy0', 'cropy1']
df['ID'] = df['ID'].astype(np.int32)
return df
def get_oai_XR(subjects, tags):
""" labels"""
df = pd.read_csv('/home/ghc/Dropbox/Z_DL/scripts/OAI_stats/collected/XR.csv')
y = dict()
y['subjects'] = subjects
for tag in tags:
label = []
for x in subjects:
try:
label.append(df.loc[(df['ID'] == x[0]) & (df['SIDE'] == 2), tag].values[0])
except:
label.append(np.nan)
y[tag] = np.array(label)
labels = | pd.DataFrame(y) | pandas.DataFrame |
from ...cost.metrics_datadog import DatadogManager, DatadogAssistant, DataNotFoundForHostInDdg, HostNotFoundInDdg, DatadogCached
import pandas as pd
import pytest
@pytest.fixture
def datadog_assistant(mocker):
def factory(series=[], host_list=[]):
mockreturn = lambda *args, **kwargs: {'series': series, 'status': 'ok'}
mockee = 'datadog.api.Metric.query'
mocker.patch(mockee, side_effect=mockreturn)
mockreturn = lambda *args, **kwargs: {'total_returned': len(host_list), 'host_list': host_list}
mockee = 'datadog.api.Hosts.search'
mocker.patch(mockee, side_effect=mockreturn)
# set start/end
import datetime as dt
from datetime import timedelta
dt_now = dt.datetime.now()
dt_1w = dt_now - timedelta(days=7)
dda = DatadogAssistant(dt_1w, dt_now, host_id='i-123456')
return dda
return factory
class TestDatadogAssistant:
def test_init(self, datadog_assistant):
dda = datadog_assistant([])
assert True
def test_getMetricsCore_noData(self, datadog_assistant):
dda = datadog_assistant(series=[])
with pytest.raises(DataNotFoundForHostInDdg):
df = dda._get_metrics_core(None, 'Average')
def test_getMetricsCore_ok(self, datadog_assistant):
dda = datadog_assistant(series=[{'pointlist': [{'ts_int': 1234567, 'Average': 2}]}])
df = dda._get_metrics_core(None, 'Average')
assert df.shape[0]==1
def test_getMeta_noData(self, datadog_assistant):
dda = datadog_assistant(host_list=[])
with pytest.raises(HostNotFoundInDdg):
df = dda._get_meta()
def test_getMeta_ok(self, datadog_assistant):
dda = datadog_assistant(host_list=[{'meta': {'gohai': '{"memory": {"total": "10kB"}}', 'cpuCores': 2}, 'name': 'i-123456'}])
res = dda._get_meta()
assert res=={'cpuCores': 2, 'memory_total': 10240}
def test_getMetricsXxxYyy(self, datadog_assistant):
pointlist = [{'ts_int': 1234567, 'cpu_idle_min': 2.5, 'cpu_idle_avg': 2.5, 'ram_free_min': 10, 'ram_free_avg': 5}]
dda = datadog_assistant(
series=[{'pointlist': pointlist}],
host_list=[{'meta': {'gohai': '{"memory": {"total": "10kB"}}', 'cpuCores': 2}, 'name': 'i-123456'}],
)
actual = dda.get_metrics_cpu_max()
assert actual.shape[0]==1
assert actual.shape[1]==3 # columns: cpu_idle_min ts_dt cpu_used_max
actual = dda.get_metrics_cpu_avg()
assert actual.shape[0]==1
assert actual.shape[1]==3
actual = dda.get_metrics_ram_max()
assert actual.shape[0]==1
assert actual.shape[1]==3
actual = dda.get_metrics_ram_avg()
assert actual.shape[0]==1
assert actual.shape[1]==3
@pytest.fixture
def datadog_manager(mocker):
def factory():
mockreturn = lambda *args, **kwargs: None
mockee = 'datadog.initialize'
mocker.patch(mockee, side_effect=mockreturn)
ddm = DatadogManager()
return ddm
return factory
@pytest.fixture
def ddgenv_set(monkeypatch):
monkeypatch.setenv('DATADOG_API_KEY', 'abcdef')
monkeypatch.setenv('DATADOG_APP_KEY', 'abcdef')
@pytest.fixture
def ddgenv_missing(monkeypatch):
monkeypatch.delenv('DATADOG_API_KEY')
monkeypatch.delenv('DATADOG_APP_KEY')
class TestDatadogManager:
def test_init(self, datadog_manager):
ddm = datadog_manager()
assert True # no exception
def test_isConfigured_true(self, datadog_manager, ddgenv_set):
ddm = datadog_manager()
assert ddm.is_configured()
def test_isConfigured_false(self, datadog_manager, ddgenv_missing):
ddm = datadog_manager()
assert not ddm.is_configured()
def test_getMetricsAll(self, datadog_manager, mocker):
def mymock(mr, me):
mockreturn = lambda *args, **kwargs: mr
mockee = 'isitfit.cost.metrics_datadog.DatadogAssistant.%s'%me
mocker.patch(mockee, side_effect=mockreturn)
import datetime as dt
dtnow = dt.datetime.now()
mockees = [
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'cpu_used_max': [3]}),
'get_metrics_cpu_max'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'cpu_used_avg': [3]}),
'get_metrics_cpu_avg'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'cpu_used_min': [3]}),
'get_metrics_cpu_min'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'ram_used_max': [3]}),
'get_metrics_ram_max'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'ram_used_avg': [3]}),
'get_metrics_ram_avg'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'ram_used_min': [3]}),
'get_metrics_ram_min'
),
( pd.DataFrame({'ts_int':[1], 'ts_dt':[dtnow], 'nhours': [3]}),
'get_metrics_count'
),
]
for mr, me in mockees: mymock(mr, me)
ddm = datadog_manager()
actual = ddm.get_metrics_all('i-123456')
assert actual.shape[0]==1
assert actual.shape[1]==8 # columns: ts_dt cpu_used_max cpu_used_avg cpu_used_min ram_used_max ram_used_avg ram_used_min nhours
@pytest.fixture
def cache_man(mocker):
"""
Mocked cache manager
"""
class MockCacheMan:
def __init__(self):
self._map = {}
self.ready = False
def isReady(self): return self.ready
def get(self, key): return self._map.get(key)
def set(self, key, val): self._map[key] = val
cache_man = MockCacheMan()
mocker.spy(cache_man, 'get')
mocker.spy(cache_man, 'set')
return cache_man
class TestDatadogCachedGetMetricsDerived:
def test_notReady_noData(self, mocker, cache_man):
# mock parent
# mockreturn = lambda *args, **kwargs: pd.DataFrame()
def mockreturn(*args, **kwargs): raise DataNotFoundForHostInDdg
mockee = 'isitfit.cost.metrics_datadog.DatadogManager.get_metrics_all'
mocker.patch(mockee, side_effect=mockreturn)
ddc = DatadogCached(cache_man)
# after first call
with pytest.raises(DataNotFoundForHostInDdg):
actual = ddc.get_metrics_derived(None, 'i-123456', None)
assert cache_man.get.call_count == 0
assert cache_man.set.call_count == 0
assert cache_man._map == {}
# after 2nd call
with pytest.raises(DataNotFoundForHostInDdg):
actual = ddc.get_metrics_derived(None, 'i-123456', None)
assert cache_man.get.call_count == 0
assert cache_man.set.call_count == 0
assert cache_man._map == {}
def test_notReady_yesData(self, mocker, cache_man):
# mock parent
mockreturn = lambda *args, **kwargs: pd.DataFrame({'a': [1,2,3]})
mockee = 'isitfit.cost.metrics_datadog.DatadogManager.get_metrics_all'
mocker.patch(mockee, side_effect=mockreturn)
ddc = DatadogCached(cache_man)
actual = ddc.get_metrics_derived(None, 'i-123456', None)
assert actual is not None
assert actual.shape[0]==3
assert cache_man.get.call_count == 0
assert cache_man.set.call_count == 0
assert cache_man._map == {}
def test_yesReady_noData(self, mocker, cache_man):
# mark as ready
cache_man.ready = True
# mock parent
# mockreturn = lambda *args, **kwargs: pd.DataFrame()
def mockreturn(*args, **kwargs): raise DataNotFoundForHostInDdg
mockee = 'isitfit.cost.metrics_datadog.DatadogManager.get_metrics_all'
mocker.patch(mockee, side_effect=mockreturn)
ddc = DatadogCached(cache_man)
host_id = 'i-123456'
# after first call
with pytest.raises(DataNotFoundForHostInDdg):
actual = ddc.get_metrics_derived(None, host_id, None)
assert cache_man.get.call_count == 1 # checks cache and doesn't find key
assert cache_man.set.call_count == 1 # first set key
assert callable(cache_man._map[ddc.get_key(host_id)])
# after 2nd call
with pytest.raises(DataNotFoundForHostInDdg):
actual = ddc.get_metrics_derived(None, host_id, None)
assert cache_man.get.call_count == 2 # incremented
assert cache_man.set.call_count == 1 # no increment
assert callable(cache_man._map[ddc.get_key(host_id)])
def test_yesReady_invalidCache(self, mocker, cache_man):
# enable mocked cache
cache_man.ready = True
# set key in cache
host_id = 'i-123456'
ddc = DatadogCached(cache_man)
cache_man._map[ddc.get_key(host_id)] = pd.DataFrame()
# fetch will raise
with pytest.raises(Exception):
actual = ddc.get_metrics_derived(None, host_id, None)
def test_yesReady_yesData(self, mocker, cache_man):
# mark as ready
cache_man.ready = True
# mock parent
mockreturn = lambda *args, **kwargs: | pd.DataFrame({'a': [1,2,3]}) | pandas.DataFrame |
from datetime import datetime
import json
from collections import OrderedDict
import os.path
from math import log
from math import e
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from math import sqrt
import pandas as pd
data = OrderedDict()
weights = []
lat = []
long = []
num_points = 500
if os.path.isfile('maps.json') :
with open ('maps.json', 'r+') as fp:
data = json.load(fp, object_pairs_hook=OrderedDict)
for key in range(len(list(data.keys()))):
if key > num_points:
break
stored = data[list(data.keys())[len(list(data.keys())) - 1 - key]]
month = stored['Month']
day = stored['Day']
year = stored['Year']
lat.append(stored['Latitude'])
long.append(stored['Longitude'])
date = month + " " + str(day) + ", " + str(year)
date_format = "%B %d, %Y"
now = datetime.now()
date_object = datetime.strptime(date, date_format)
delta = now - date_object
num_hours = delta.days*24
if num_hours != 0:
weights.append(sqrt(1.0/num_hours) * 1000)
else:
weights.append(25)
weights = np.array(weights)
weights = weights.reshape(-1, 1)
min_max_scaler = MinMaxScaler(feature_range=(0, 2))
weights = min_max_scaler.fit_transform(np.float32(weights))
weights = weights.tolist()
points = OrderedDict()
long_shit = []
lat_shit = []
weight_shit = []
for i in range(num_points):
long_shit.append(long[i])
lat_shit.append(lat[i])
weight_shit.append(weights[i][0])
df = | pd.DataFrame() | pandas.DataFrame |
# general
import math
import logging
import json
import os,sys
from pIMZ.regions import SpectraRegion
import random
from collections import defaultdict, Counter
import glob
import shutil, io, base64
# general package
from natsort import natsorted
import pandas as pd
import numpy as np
from numpy.ctypeslib import ndpointer
from pyimzml.ImzMLParser import ImzMLParser, browse, getionimage
import ms_peak_picker
import regex as re
# image
import skimage
from skimage import measure as sk_measure
from adjustText import adjust_text
# processing
import ctypes
import subprocess
import dill as pickle
#vis
import dabest
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#methods
import umap
import hdbscan
import diffxpy.api as de
import anndata
from scipy import ndimage, misc, sparse
from scipy.sparse.linalg import spsolve
from scipy.spatial.distance import squareform, pdist
import scipy.cluster as spc
import scipy as sp
import sklearn as sk
from sklearn.metrics.pairwise import cosine_similarity
#web/html
import jinja2
# applications
import progressbar
class CombinedSpectra():
"""CombinedSpectra class for a combined analysis of several spectra regions.
"""
def __setlogger(self):
"""Sets up logging facilities for CombinedSpectra.
"""
self.logger = logging.getLogger('CombinedSpectra')
if len(self.logger.handlers) == 0:
self.logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
def __init__(self, regions):
"""Initializes a CombinedSpectra object with the following attributes:
- logger (logging.Logger): Reference to the Logger object.
- regions (dict): A dictionary that has SpectraRegion objects names as keys and respective SpectraRegion objects as values. If a SpectraRegion object does not have a name attribute it will be named according to the region id.
- consensus_similarity_matrix (pandas.DataFrame): Pairwise similarity matrix between consensus spectra of all combinations of regions. Initialized with None.
- region_cluster2cluster (dict): A dictionary where every tuple (region name, region id) is mapped to its cluster id where it belongs. Initialized with None.
- region_array_scaled (dict): A dictionary where each SpectraRegion name is mapped to the respective scaled region array either using "avg" (average) or "median" method. Initialized with an empty dict.
- de_results_all (dict): Methods mapped to their differential analysis results (as pd.DataFrame). Initialized with an empty defaultdict.
Args:
regions (dict): A dictionary that maps region ids to respective SpectraRegion objects.
"""
self.regions = {}
self.consensus_similarity_matrix = None
self.region_cluster2cluster = None
self.region_array_scaled = {}
self.de_results_all = defaultdict(lambda: dict())
self.df_results_all = defaultdict(lambda: dict())
self.logger = None
self.__setlogger()
for x in regions:
addregion = regions[x]
if addregion.name == None:
addregion.name = x
self.regions[addregion.name] = regions[x]
def __get_spectra_similarity(self, vA, vB):
"""Calculates cosine similarity between two vectors of the same length.
Args:
vA (numpy.array/list): First vector.
vB (numpy.array/list): Second vector.
Returns:
float: cosine similarity.
"""
if (np.all((vA == 0)) or np.all((vB == 0))):
return 0
return np.dot(vA, vB) / (np.sqrt(np.dot(vA,vA)) * np.sqrt(np.dot(vB,vB)))
def consensus_similarity(self):
"""
Calculates consensus_similarity_matrix of CombinedSpectra object.
The resulting pandas.DataFrame is a pairwise similarity matrix between consensus spectra of all combinations of regions.
If the object was not yet scaled, it will get scaled.
"""
self.check_scaled()
allConsSpectra = {}
for regionName in self.region_array_scaled:
scaled_region = self.region_array_scaled[regionName]
region = self.regions[regionName]
regionCS = region.consensus_spectra(array=scaled_region, set_consensus=False)
for clusterid in regionCS:
allConsSpectra[(regionName, clusterid)] = regionCS[clusterid]
allRegionClusters = sorted([x for x in allConsSpectra])
distDF = pd.DataFrame(0.0, index=allRegionClusters, columns=allRegionClusters)
for i in range(0, len(allRegionClusters)):
regionI = allRegionClusters[i]
for j in range(i, len(allRegionClusters)):
regionJ = allRegionClusters[j]
specSim = self.__get_spectra_similarity(allConsSpectra[regionI], allConsSpectra[regionJ])
distDF[regionI][regionJ] = specSim
distDF[regionJ][regionI] = specSim
self.consensus_similarity_matrix = distDF
def plot_consensus_similarity(self):
"""Plots the similarity matrix represented as seaborn.heatmap.
"""
sns.heatmap(self.consensus_similarity_matrix, xticklabels=1, yticklabels=1)
plt.show()
plt.close()
def cluster_concensus_spectra(self, number_of_clusters=5):
"""Performs clustering using Ward variance minimization algorithm on similarity matrix of consensus spectra and updates region_cluster2cluster with the results. region_cluster2cluster dictionary maps every tuple (region name, region id) to its cluster id where it belongs. Additionally plots the resulting dendrogram depicting relationships of regions to each other.
Args:
number_of_clusters (int, optional): Number of desired clusters. Defaults to 5.
"""
df = self.consensus_similarity_matrix.copy()
# Calculate the distance between each sample
Z = spc.hierarchy.linkage(df.values, 'ward')
plt.figure(figsize=(8,8))
# Make the dendro
spc.hierarchy.dendrogram(Z, labels=df.columns.values, leaf_rotation=0, orientation="left", color_threshold=240, above_threshold_color='grey')
c = spc.hierarchy.fcluster(Z, t=number_of_clusters, criterion='maxclust')
lbl2cluster = {}
region2cluster = {}
for lbl, clus in zip(df.columns.values, c):
lbl2cluster[str(lbl)] = clus
region2cluster[lbl] = clus
# Create a color palette with 3 color for the 3 cyl possibilities
my_palette = plt.cm.get_cmap("viridis", number_of_clusters)
# Apply the right color to each label
ax = plt.gca()
xlbls = ax.get_ymajorticklabels()
for lbl in xlbls:
val=lbl2cluster[lbl.get_text()]
#print(lbl.get_text() + " " + str(val))
lbl.set_color(my_palette(val-1))
plt.show()
plt.close()
self.region_cluster2cluster = region2cluster
def check_scaled(self):
"""Detects not scaled region arrays and norms them using "median" method.
"""
hasToReprocess = False
for regionName in self.regions:
if not regionName in self.region_array_scaled:
hasToReprocess = True
break
if hasToReprocess:
self.logger.info("Calculating internormed regions")
self.get_internormed_regions()
def mass_intensity(self, masses, regions=None, scaled=False, verbose=True):
"""Plots seaborn.boxplot for every selected region depicting the range of intensity values in each cluster.
Args:
masses (float/list/tuple/set): Desired mass(es).
regions (list/numpy.array, optional): Desired regions where to look for mass intensities. Defaults to None meaning to consider all available regions.
scaled (bool, optional): Whether to use intensity values of scaled region arrays. Defaults to False.
verbose (bool, optional): Whether to add information to the logger. Defaults to True.
"""
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
if scaled:
self.check_scaled()
for regionName in self.regions:
if not regions is None and not regionName in regions:
continue
cregion = self.regions[regionName]
cluster2coords = cregion.getCoordsForSegmented()
if not scaled:
dataArray = cregion.region_array
else:
dataArray = self.region_array_scaled[regionName]
for mass in masses:
bestExMassForMass, bestExMassIdx = cregion._get_exmass_for_mass(mass)
if verbose:
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
clusterIntensities = defaultdict(list)
for clusterid in cluster2coords:
for coord in cluster2coords[clusterid]:
intValue = dataArray[coord[0], coord[1], bestExMassIdx]
clusterIntensities[clusterid].append(intValue)
clusterVec = []
intensityVec = []
massVec = []
specIdxVec = []
for x in clusterIntensities:
elems = clusterIntensities[x]
specIdxVec += [i for i in range(0, len(elems))]
clusterVec += ["Cluster " + str(x)] * len(elems)
intensityVec += elems
massVec += [mass] * len(elems)
dfObj = pd.DataFrame({"mass": massVec, "specidx": specIdxVec, "cluster": clusterVec, "intensity": intensityVec})
sns.boxplot(data=dfObj, x="cluster", y="intensity")
plt.xticks(rotation=90)
plt.title("Intensities for Region {} ({}m/z)".format(regionName, mass))
plt.show()
plt.close()
def mass_heatmap(self, masses, log=False, min_cut_off=None, plot=True, scaled=False, verbose=True, title="{mz}"):
"""Plots heatmap for every selected region depicting region_array spectra reduced to the sum of the specified masses.
Args:
masses (float/list/tuple/set): Desired mass(es).
log (bool, optional): Whether to take logarithm of the output matrix. Defaults to False.
min_cut_off (int/float, optional): Lower limit of values in the output matrix. Smaller values will be replaced with min_cut_off. Defaults to None.
plot (bool, optional): Whether to plot the output matrix. Defaults to True.
scaled (bool, optional): Whether to use intensity values of scaled region arrays. Defaults to False.
verbose (bool, optional): Whether to add information to the logger. Defaults to True.
title (str, optional): Format string defining the plot's title.
Returns:
numpy.array: A matrix of the last region where each element is a sum of intensities at given masses.
"""
if not isinstance(masses, (list, tuple, set)):
masses = [masses]
if scaled:
self.check_scaled()
region2segments = {}
for regionName in self.regions:
cregion = self.regions[regionName]
if scaled == False:
dataArray = self.regions[regionName].region_array
else:
dataArray = self.region_array_scaled[regionName]
image = np.zeros((dataArray.shape[0], dataArray.shape[1]))
for mass in masses:
bestExMassForMass, bestExMassIdx = cregion._get_exmass_for_mass(mass)
if verbose:
self.logger.info("Processing Mass {} with best existing mass {}".format(mass, bestExMassForMass))
for i in range(dataArray.shape[0]):
for j in range(dataArray.shape[1]):
image[i,j] += dataArray[i,j,bestExMassIdx]
if log:
image = np.log(image)
if min_cut_off != None:
image[image <= min_cut_off] = min_cut_off
region2segments[regionName] = image
if plot:
rows = math.ceil(len(self.regions) / 2)
fig, axes = plt.subplots(rows, 2)
if len(axes.shape) > 1:
axes = np.reshape(axes, (1, axes.shape[0] * axes.shape[1]))[0][:]
allMin, allMax = 0,0
for regionName in region2segments:
allMin = min(allMin, np.min(region2segments[regionName]))
allMax = max(allMax, np.max(region2segments[regionName]))
didx = 0
for didx, regionName in enumerate(region2segments):
ax = axes[didx]
heatmap = ax.matshow(region2segments[regionName], vmin=allMin, vmax=allMax)
# We must be sure to specify the ticks matching our target names
ax.set_title(regionName, color="w", y=0.1)
for ddidx in range(didx+1, rows*2):
ax = axes[ddidx]
ax.axis('off')
#fig.colorbar(heatmap, ax=axes[-1])
plt.colorbar(heatmap, ax=axes[:], spacing='proportional')
plt.suptitle(title.format(mz=";".join([str(round(x, 3)) if not type(x) in [str] else x for x in masses])))
plt.show()
plt.close()
return image
def plot_segments(self, highlight=None):
"""Plots segmented arrays of all regions as heatmaps.
Args:
highlight (list/tuple/set/int, optional): If cluster ids are specified here, the resulting clustering will have cluster id 2 for highlight clusters, cluster id 0 for background, and cluster id 1 for the rest. Defaults to None.
"""
assert(not self.region_cluster2cluster is None)
allClusters = [self.region_cluster2cluster[x] for x in self.region_cluster2cluster]
valid_vals = sorted(set(allClusters))
region2segments = {}
for regionName in self.regions:
origSegments = np.array(self.regions[regionName].segmented, copy=True)
region2segments[regionName] = origSegments
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for regionName in region2segments:
showcopy = np.copy(region2segments[regionName])
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
region2segments[regionName] = showcopy
self._plot_arrays(region2segments)
def plot_common_segments(self, highlight=None):
"""Plots segmented arrays of every region annotating the clusters with respect to new clustering done with CombinedSpectra (saved in region_cluster2cluster).
Args:
highlight (list/tuple/set/int, optional): If cluster ids are specified here, the resulting clustering will have cluster id 2 for highlight clusters, cluster id 0 for background, and cluster id 1 for the rest. Defaults to None.
"""
assert(not self.region_cluster2cluster is None)
allClusters = [self.region_cluster2cluster[x] for x in self.region_cluster2cluster]
valid_vals = sorted(set(allClusters))
region2segments = {}
for regionName in self.regions:
origSegments = np.array(self.regions[regionName].segmented, copy=True)
origCluster2New = {}
for x in self.region_cluster2cluster:
if x[0] == regionName:
origCluster2New[x[1]] = self.region_cluster2cluster[x]
newSegments = np.zeros(origSegments.shape)
print(origCluster2New)
for i in range(0, newSegments.shape[0]):
for j in range(0, newSegments.shape[1]):
newSegments[i,j] = origCluster2New.get(origSegments[i,j], 0)
region2segments[regionName] = newSegments
if highlight != None:
if not isinstance(highlight, (list, tuple, set)):
highlight = [highlight]
for regionName in region2segments:
showcopy = np.copy(region2segments[regionName])
for i in range(0, showcopy.shape[0]):
for j in range(0, showcopy.shape[1]):
if showcopy[i,j] != 0:
if showcopy[i,j] in highlight:
showcopy[i,j] = 2
elif showcopy[i,j] != 0:
showcopy[i,j] = 1
region2segments[regionName] = showcopy
self._plot_arrays(region2segments)
def _plot_arrays(self, region2segments):
"""Plots heatmaps for every region given in region2segments.
Args:
region2segments (dict): A dictionary with region names as keys and respective segmented arrays as values.
"""
rows = math.ceil(len(region2segments) / 2)
fig, axes = plt.subplots(rows, 2)
valid_vals = set()
for regionName in region2segments:
plotarray = region2segments[regionName]
valid_vals = valid_vals.union(list(np.unique(plotarray)))
valid_vals = sorted(valid_vals)
min_ = min(valid_vals)
max_ = max(valid_vals)
positions = np.linspace(min_, max_, len(valid_vals))
val_lookup = dict(zip(positions, valid_vals))
print(val_lookup)
def formatter_func(x, pos):
'The two args are the value and tick position'
val = val_lookup[x]
return val
if len(axes.shape) > 1:
axes = np.reshape(axes, (1, axes.shape[0] * axes.shape[1]))[0][:]
didx=0
for didx, regionName in enumerate(region2segments):
ax = axes[didx]
im = ax.matshow(region2segments[regionName], cmap=plt.cm.get_cmap('viridis', len(valid_vals)), vmin=min_, vmax=max_)
formatter = plt.FuncFormatter(formatter_func)
# We must be sure to specify the ticks matching our target names
ax.set_title(regionName, color="w", y=0.9, x=0.1)
for ddidx in range(didx+1, rows*2):
ax = axes[ddidx]
ax.axis('off')
plt.colorbar(im, ax=axes[:], ticks=positions, format=formatter, spacing='proportional')
plt.show()
plt.close()
def __make_de_res_key(self, region0, clusters0, region1, clusters1):
"""Generates the storage key for two sets of clusters.
Args:
region0 (int): first region id.
clusters0 (list): list of cluster ids 1.
region1 (int): second region id.
clusters1 (list): list of cluster ids 2.
Returns:
tuple: tuple (region0, sorted clusters0, region1, sorted clusters1)
"""
return (region0, tuple(sorted(clusters0)), region1, tuple(sorted(clusters1)))
def to_region_cluster_input(self, region_cluster_list):
rcl0 = defaultdict(list)
for x in region_cluster_list:
rcl0[x[0]].append(x[1])
rcl0 = [(x, tuple(sorted(rcl0[x]))) for x in rcl0]
return rcl0
def find_markers(self, region_cluster_list0, region_cluster_list1, protWeights, mz_dist=3, mz_best=False, use_methods = ["empire", "ttest", "rank"], count_scale={"ttest": 1, "rank": 1}, scaled=True, sample_max=-1):
"""Performs differential analysis to finds marker proteins for specific regions and clusters.
Args:
region_cluster_list0 (list/numpy.array): A list of tuples (region id, list of clusters) that will be used as the 0 conditional vector by differential analysis.
region_cluster_list1 (list/numpy.array): A list of tuples (region id, list of clusters) that will be used as the 1 conditional vector by differential analysis.
protWeights (ProteinWeights): ProteinWeights object for translation of masses to protein names.
mz_dist (float/int, optional): Allowed offset for protein lookup of needed masses. Defaults to 3.
mz_best (bool, optional): Wether to consider only the closest found protein within mz_dist (with the least absolute mass difference). Defaults to False.
use_methods (str/list, optional): Test method(s) for differential expression. Defaults to ["empire", "ttest", "rank"].\n
- "empire": Empirical and Replicate based statistics (EmpiRe).\n
- "ttest": Welch’s t-test for differential expression using diffxpy.api.\n
- "rank": Mann-Whitney rank test (Wilcoxon rank-sum test) for differential expression using diffxpy.api.\n
count_scale (dict, optional): Count scales for different methods (relevant for empire, which can only use integer counts). Defaults to {"ttest": 1, "rank": 1}.
scaled (bool, optional): Wether each processed region is normalized. Those which are not will be scaled with the median method. Defaults to True.
sample_max (int, optional): Allowed number of samples (spectra of specified regions&clusters) will be used by differential analysis (will be randomly picked if there are more available than allowed). Defaults to -1 meaning all samples are used.
Returns:
tuple: Tuple (collections.defaultdict, pandas.core.frame.DataFrame, pandas.core.frame.DataFrame). Dictionary with test method mapped to each tuple (region, clusters) and respective results. Two further data frames with expression data and test design.
"""
if type(region_cluster_list0) in (list, tuple):
region_cluster_list0 = self.to_region_cluster_input(region_cluster_list0)
if type(region_cluster_list1) in (list, tuple):
region_cluster_list1 = self.to_region_cluster_input(region_cluster_list1)
for pair in region_cluster_list0:
assert(pair[0] in self.regions)
assert([x for x in self.regions[region_cluster_list0[0][0]].idx2mass] == [x for x in self.regions[pair[0]].idx2mass])
for pair in region_cluster_list1:
assert(pair[0] in self.regions)
assert([x for x in self.regions[region_cluster_list1[0][0]].idx2mass] == [x for x in self.regions[pair[0]].idx2mass])
cluster2coords0 = {}
for pair in region_cluster_list0:
cluster2coords0[pair[0]] = self.regions[pair[0]].getCoordsForSegmented()
assert(all([x in cluster2coords0[pair[0]] for x in pair[1]]))
cluster2coords1 = {}
for pair in region_cluster_list1:
cluster2coords1[pair[0]] = self.regions[pair[0]].getCoordsForSegmented()
assert(all([x in cluster2coords1[pair[0]] for x in pair[1]]))
resKey = self.__make_de_res_key(region_cluster_list0[0][0], region_cluster_list0[0][1], region_cluster_list1[0][0], region_cluster_list1[0][1])
if scaled:
self.check_scaled()
if self.de_results_all is None:
self.de_results_all = defaultdict(lambda: dict())
sampleVec = []
conditionVec = []
exprData = | pd.DataFrame() | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import cmdstanpy
import arviz as az
# Load the datasets
calib_data = | pd.read_csv('../../data/calibration/2021-04-05_hplc_calibration/processed/2021-04-05_NC_DM_calibration_relative_areas.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Get working directory
os.getcwd()
os.chdir('/Users/pablobottero/github/master/python/data_analysis')
os.getcwd()
rentals_2011= | pd.read_csv('washington_bike_rentals_2011.csv', sep=';', decimal=',') | pandas.read_csv |
"""
This module includes two types of discrete state-space formulations for biogas plants.
The anaerobic digestion model in FlexibleBiogasPlantModel is based on the work in
https://doi.org/10.1016/j.energy.2017.12.073 and ISBN: 978-3-319-16192-1
The module is designed to work with fledge: https://doi.org/10.5281/zenodo.3715873
The code is organized and implemented based on the flexible building model cobmo: https://zenodo.org/record/3523539
"""
import numpy as np
import pandas as pd
import scipy.linalg
import os
import inspect
import sys
import datetime as dt
import pyomo.environ as pyo
import bipmo.utils
class BiogasPlantModel(object):
"""
BiogasPlantModel represents all attributes and functions that all biogas plants have in common. It is the basis for
every model that inherits from it. Caution: It does not work as a standalone model!
"""
model_type: str = None
der_name: str = 'Biogas Plant'
plant_scenarios: pd.DataFrame
states: pd.Index
controls: pd.Index
outputs: pd.Index
switches: pd.Index
chp_schedule: pd.DataFrame
disturbances: pd.Index
state_vector_initial: pd.Series
state_matrix: pd.DataFrame
control_matrix: pd.DataFrame
disturbance_matrix: pd.DataFrame
state_output_matrix: pd.DataFrame
control_output_matrix: pd.DataFrame
disturbance_output_matrix: pd.DataFrame
timestep_start: pd.Timestamp
timestep_end: pd.Timestamp
timestep_interval: pd.Timedelta
timesteps: pd.Index
disturbance_timeseries: pd.DataFrame
output_maximum_timeseries: pd.DataFrame
output_minimum_timeseries: pd.DataFrame
marginal_cost: float
lhv_table: pd.DataFrame
temp_in: float
cp_water: float
feedstock_limit_type: str
available_feedstock: float
def __init__(
self,
scenario_name: str,
timestep_start=None,
timestep_end=None,
timestep_interval=None,
connect_electric_grid=True,
):
# Scenario name.
self.scenario_name = scenario_name
# Define the biogas plant model (change paths accordingly).
base_path = os.path.dirname(os.path.dirname(os.path.normpath(__file__)))
# Load the scenario.
self.plant_scenarios = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_scenario.csv')
)
self.plant_scenarios = self.plant_scenarios[
self.plant_scenarios['scenario_name'] == self.scenario_name]
self.plant_scenarios.index = pd.Index([self.scenario_name])
# Load marginal cost
self.marginal_cost = self.plant_scenarios.loc[
self.scenario_name, 'marginal_cost_EUR_Wh-1']
# Load feedstock data used in the scenario.
self.plant_feedstock = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_feedstock.csv')
)
self.plant_feedstock = self.plant_feedstock[
self.plant_feedstock['feedstock_type']
== self.plant_scenarios.loc[self.scenario_name, 'feedstock_type']
]
self.plant_feedstock.index = pd.Index([self.scenario_name])
self.feedstock_limit_type = self.plant_scenarios.loc[
self.scenario_name, 'availability_limit_type']
self.available_feedstock = self.plant_scenarios.loc[
self.scenario_name, 'availability_substrate_ton_per_year']
# Load CHP data used in the scenario.
self.CHP_list = self.plant_scenarios.CHP_name[self.scenario_name].split()
self.number_CHP = len(self.CHP_list)
self.plant_CHP_source = pd.read_csv(
os.path.join(base_path, 'data/biogas_plant_CHP.csv')
)
self.plant_CHP = | pd.DataFrame(columns=self.plant_CHP_source.columns) | pandas.DataFrame |
import os
import pymongo
import logging
import requests
import pandas as pd
from lxml import etree
from urllib.parse import urlsplit
from six.moves.configparser import ConfigParser
from scrapy.exceptions import CloseSpider
logger = logging.getLogger('custom-logger')
def remove_url_schema(url):
split_url = urlsplit(url)
edited_url = split_url.netloc + split_url.path + \
split_url.query + split_url.fragment
return edited_url
def connect_db(settings):
client = pymongo.MongoClient(
settings['MONGODB_SERVER'],
int(settings['MONGODB_PORT'])
)
try:
client.server_info()
except pymongo.errors.ServerSelectionTimeoutError as e:
raise CloseSpider('Unable to connect to MongoDB')
logger.info("Connected to MongoDB")
return client
def drop_db(settings):
client = pymongo.MongoClient(
settings['MONGODB_SERVER'],
int(settings['MONGODB_PORT'])
)
try:
client.server_info()
client.drop_database(settings['MONGODB_DB'])
client.close()
except pymongo.errors.ServerSelectionTimeoutError as e:
raise CloseSpider('Unable to connect to MongoDB')
def get_sitemap_url():
config_file = "config/settings.ini"
parser = ConfigParser()
parser.read(config_file)
for section_name in parser.sections():
if section_name == "Sitemaps":
urls = list()
for name, value in parser.items(section_name):
urls.append(value)
logger.info("Sitemap URL - %s", value)
return urls
raise CloseSpider('Sitemap URL not provided')
def parse_sitemap(sitemap_urls):
urls = dict()
for url in sitemap_urls:
response = requests.get(url)
sitemap_xml = etree.fromstring(response.content)
for urlset in sitemap_xml:
children = urlset.getchildren()
edited_url = remove_url_schema(children[0].text)
urls[edited_url] = 0
return urls
def generate_report(stats):
filepath = 'log/scrapy_stats.csv'
stats['scraping time'] = stats['finish_time'] - stats['start_time']
df = pd.DataFrame(list(stats.items()), columns=['parameter', 'value'])
df.set_index('parameter', inplace=True)
if not os.path.isfile(filepath):
df.to_csv(filepath)
else:
cdf = pd.read_csv(filepath, index_col='parameter')
concat_df = | pd.concat([df, cdf], axis=1, sort=False, join='inner') | pandas.concat |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Test the projection module
"""
import unittest
import numpy as np
import pandas as pd
from numpy.polynomial import legendre
from pyrotor.projection import trajectory_to_coef
from pyrotor.projection import trajectories_to_coefs
from pyrotor.projection import compute_weighted_coef
from pyrotor.projection import coef_to_trajectory
def test_trajectory_to_coef():
# Test Legendre
y = pd.DataFrame({"A": [1, 2, 3, 4, 5],
"B": [-4, -1, 4, 11, 20]})
basis_dimension = {"A": 3, "B": 2}
basis_features = basis_dimension
basis = "legendre"
expected_coef = np.array([3., 2., 0., 6., 12.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
# Test B-spline
x = np.linspace(0, 1, 20)
y = pd.DataFrame({"A": x,
"B": x**2})
basis_features = {"knots": [.25, .5, .75], "A": 2, "B": 3}
basis_dimension = {"A": 6, "B": 7}
basis = "bspline"
expected_coef = np.array([0., .125, .375, .625, .875, 1.,
0., 0., 4.16666667e-02, 2.29166667e-01, 5.41666667e-01, 8.33333333e-01, 1.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
def test_trajectories_to_coefs():
# Test Legendre
y = [pd.DataFrame({"A": [1, 2, 3, 4, 5]}),
pd.DataFrame({"A": [-4, -1, 4, 11, 20]})]
basis_dimension = {"A": 2}
basis_features = basis_dimension
basis = "legendre"
expected_coefs_traj_1 = np.array([3., 2.])
expected_coefs_traj_2 = np.array([6., 12.])
n_jobs = None
result = trajectories_to_coefs(y, basis, basis_features, basis_dimension, n_jobs)
result_1 = result[0]
result_2 = result[1]
np.testing.assert_almost_equal(result_1, expected_coefs_traj_1)
np.testing.assert_almost_equal(result_2, expected_coefs_traj_2)
# Test B-spline
x = np.linspace(0, 1, 20)
y = [pd.DataFrame({"A": x}),
pd.DataFrame({"A": 2 * x})]
basis_features = {"knots": [.25, .5, .75], "A": 2}
basis_dimension = {"A": 6}
basis = "bspline"
expected_coefs_traj_1 = np.array([0., .125, .375, .625, .875, 1.])
expected_coefs_traj_2 = np.array([0., .25, .75, 1.25, 1.75, 2.])
n_jobs = None
result = trajectories_to_coefs(y, basis, basis_features, basis_dimension, n_jobs)
result_1 = result[0]
result_2 = result[1]
np.testing.assert_almost_equal(result_1, expected_coefs_traj_1)
np.testing.assert_almost_equal(result_2, expected_coefs_traj_2)
def test_compute_weighted_coef():
coef1 = pd.Series([1, 2, 0], dtype='float64')
coef2 = | pd.Series([-2, 0, 1], dtype='float64') | pandas.Series |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
from utils import animation_to_gif
pio.templates.default = "simple_white"
FEAT_NAMES = {"waterfront": "Waterfront", "view": "View", "condition": "Condition", "grade": "Grade",
"price": "Price",
"sqft_living": "Interior Living Space (sqft)",
"sqft_above": "Interior Housing Space Above Ground Level (sqft)",
"sqft_lot": "Land Space (sqft)",
"yr_built": "Year of Initial Built",
"sqft_living15": "Interior Living Space for the Nearest 15 Neighbors (sqft)",
"sqft_lot15": "Land Space of the Nearest 15 Neighbors (sqft)",
"bathrooms": "Bathrooms", "floors": "Floors",
"sqft_basement": "Interior Housing Space Below Ground Level (sqft)",
"yr_renovated": "Year of last renovation"}
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).dropna().drop_duplicates()
# eliminate irrelevant columns "home id", "date of home sale" and "longitude/"latitude"
irrelevant_feats = {"id", "lat", "long", "date"}
for feat in irrelevant_feats:
df = df.drop(feat, 1)
# check feats in a specific range
range_feats = {"waterfront": [0, 1], "view": range(5), "condition": range(1, 6), "grade": range(1, 14)}
for feat in range_feats:
df = df[df[feat].isin(range_feats[feat])]
# check positive / non-negative feats
positive_feats = {"price", "sqft_living", "sqft_lot", "sqft_above", "yr_built", "sqft_living15", "sqft_lot15"}
non_negative_feats = {"bathrooms", "floors", "sqft_basement", "yr_renovated"}
for feat in positive_feats:
df = df[df[feat] > 0]
for feat in non_negative_feats:
df = df[df[feat] >= 0]
# zipcode manipulation
df = pd.get_dummies(df, prefix='zipcode_', columns=['zipcode'])
df.insert(0, 'intercept', 1, True)
return df.drop("price", 1), df.price
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for feat in X:
if feat == "zipcode":
pearson_cor = np.cov(X[feat], y) / (np.std(X[feat]) * np.std(y))
name = feat
if feat in FEAT_NAMES:
name = FEAT_NAMES[feat]
fig = px.scatter( | pd.DataFrame({'x': X[feat], 'y': y}) | pandas.DataFrame |
"""
This script contains a function for converting a text file into a csv file.
Unlike the conversion from csv to text, text to csv does not require as many functionalities.
"""
from argparse import ArgumentParser
import os.path as osp
import sys
import pandas as pd
import numpy as np
def _parse_args():
"""
Parses arguments for the main script.
"""
parser = ArgumentParser(description='utility script for converting a midi text file into a csv file',
prog='python text_to_csv.py')
parser.add_argument(
'-t',
'--ticks',
help="Ticks per each time step (default: 25). "
"You should use the same value that you used to convert the original csv into the text.",
type=int,
default=25
)
parser.add_argument(
'-v',
'--velocity',
help="Velocity value for notes (default: 80).",
type=int,
default=80
)
parser.add_argument(
'-e',
'--end-track',
help="Add an 'End_track' item after the last note. "
"Use it if you are not using the original midi text "
"(e.g. if you're using a machine-generated text).",
action='store_true',
default=False
)
parser.add_argument(
'-f',
'--filter-out',
help="This value will be used for filtering out short sounds. "
"If set to 0, then no filtering will be done. (default: 0)",
type=int,
default=0
)
parser.add_argument(
'-m',
'--merge',
help="This value will be used for merging nearby notes. "
"If set to 0, then no merging will be done. (default: 0)",
type=int,
default=0
)
parser.add_argument(
'--merge-first',
help="Merge notes and then filter.",
action='store_true',
default=False
)
parser.add_argument(
'--verbose',
help="make the process more verbose.",
action='store_true',
default=False
)
parser.add_argument(
'text',
type=str,
help="File path for the text file to convert."
)
parser.add_argument(
'csv',
nargs='?',
type=str,
help="File path for the resulting csv file (Optional). By default, the csv file will be "
"generated in the same directory as the source text file."
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if not args.ticks > 0:
parser.error("The value for ticks per time step must be at least 1.")
if not args.velocity >= 0:
parser.error("The value for ticks per time step must be non-negative.")
if not osp.isfile(args.text):
parser.error("The input text file does not exist. Please, check the file path and try again.")
if args.csv and not osp.isdir(osp.dirname(args.csv)):
parser.error("The result path does not exist. Please, use an existing directory.")
if args.filter_out < 0:
parser.error("The value for filter_out must be a non-negative integer.")
if args.merge < 0:
parser.error("The value for merge must be a non-negative integer.")
return args
def text_to_midicsv(midi_text, ticks_per_step=25, vel=80, add_end_track=False, filter_interval=0, merge_interval=0, merge_first=False, verbose=False):
"""
Converts the given midi text file into a pandas dataframe.
Then, the resulting dataframe is returned.
When constructing the dataframe, it uses ticks_per_step to determine the time tick value of each note.
All 'Note_on's will have the same velocity specified as a parameter.
If add_end_track is True, it will add an 'End_track' 1920 ticks after the last item from the text.
Otherwise, it will consider the last item in the text as the 'End_track'.
If clean is True, it will filter out short noises that are only 1 timestep long and merge notes that are only 1 timestep away from each other.
I verbose is True, it will report the progress as it processes the data.
"""
NUMBER_OF_PITCH = 128
COL_NAMES = ['Track', 'Time', 'Type', 'Val1', 'Val2', 'Val3']
HEADER = [0, 0, 'Header', 1, 1, 480]
START_TRACK = [1, 0, 'Start_track', np.nan, np.nan, np.nan]
midi_text = ' ' + midi_text
if add_end_track:
midi_text = midi_text + ' 0'
text_list = midi_text.split(' 0')
# Total number of time steps
n_steps = len(text_list)
if verbose:
print("\n* Converting the text into a numpy matrix ...")
note_time_matrix = np.zeros((NUMBER_OF_PITCH, n_steps), dtype=np.uint8)
for time_step in range(n_steps):
note_str = text_list[time_step]
if note_str != '':
for note_num in note_str.strip().split(' '):
note_time_matrix[int(note_num) - 1, time_step] = 1
if merge_first and merge_interval > 0:
if verbose:
print("* Merging nearby notes ...")
for interval in range(1, merge_interval + 1):
pattern = [1] + [0] * interval + [1]
for time_step in range(1, n_steps - interval):
pattern_match_indices = np.equal(note_time_matrix[:, (time_step - 1) : (time_step + interval + 1)], pattern).all(axis=1).nonzero()[0]
if len(pattern_match_indices) > 0:
note_time_matrix[pattern_match_indices, time_step : (time_step + interval)] = 1
if filter_interval > 0:
if verbose:
print("* Filtering short notes ...")
for interval in range(1, filter_interval + 1):
pattern = [1] * interval + [0]
pattern_match_indices = np.equal(note_time_matrix[:, :interval + 1], pattern).all(axis=1).nonzero()[0]
if len(pattern_match_indices) > 0:
note_time_matrix[pattern_match_indices, :interval] = 0
pattern = [0] + [1] * interval + [0]
for time_step in range(1, n_steps - interval):
pattern_match_indices = np.equal(note_time_matrix[:, (time_step - 1) : (time_step + interval + 1)], pattern).all(axis=1).nonzero()[0]
if len(pattern_match_indices) > 0:
note_time_matrix[pattern_match_indices, time_step : (time_step + interval)] = 0
if not merge_first and merge_interval > 0:
if verbose:
print("* Merging nearby notes ...")
for interval in range(1, merge_interval + 1):
pattern = [1] + [0] * interval + [1]
for time_step in range(1, n_steps - interval):
pattern_match_indices = np.equal(note_time_matrix[:, (time_step - 1) : (time_step + interval + 1)], pattern).all(axis=1).nonzero()[0]
if len(pattern_match_indices) > 0:
note_time_matrix[pattern_match_indices, time_step : (time_step + interval)] = 1
if verbose:
print("* Converting the matrix into a pandas dataframe ...")
data_lists = [HEADER, START_TRACK]
if note_time_matrix[:, 0].nonzero()[0].any():
for pitch in note_time_matrix[:, 0].nonzero()[0]:
data_lists.append([1, 0, 'Note_on_c', 0, pitch, vel])
for time_step in range(1, n_steps - 1):
change_occured = note_time_matrix[:, time_step - 1] != note_time_matrix[:, time_step]
for pitch in change_occured.nonzero()[0]:
if note_time_matrix[pitch, time_step] == 1:
velocity = vel
else:
velocity = 0
data_lists.append([1, (time_step * ticks_per_step), 'Note_on_c', 0, pitch, velocity])
if not add_end_track:
data_lists.append([1, ((n_steps - 1) * ticks_per_step), 'End_track'])
else:
change_occured = note_time_matrix[:, n_steps - 2] != note_time_matrix[:, n_steps - 1]
for pitch in change_occured.nonzero()[0]:
if note_time_matrix[pitch, time_step] == 1:
velocity = vel
else:
velocity = 0
data_lists.append([1, ((n_steps - 1) * ticks_per_step), 'Note_on_c', 0, pitch, velocity])
data_lists.append([1, ((n_steps - 1) * ticks_per_step) + 1920, 'End_track'])
data_lists.append([0, 0, 'End_of_file'])
midi_csv = | pd.DataFrame(data=data_lists, columns=COL_NAMES) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times = pd.Series([])
time_wspds = pd.Series([])
mean_time_wspds = pd.Series([])
c = pd.Series([])
slope = pd.Series([])
intercept = pd.Series([])
alpha = pd.Series([])
roughness = pd.Series([])
slope_df = pd.DataFrame([])
intercept_df = pd.DataFrame([])
roughness_df = pd.DataFrame([])
alpha_df = pd.DataFrame([])
# time of day shear calculations
interval = int(24 / segments_per_day)
if by_month is False and plot_type == '12x24':
raise ValueError("12x24 plot is only possible when 'by_month=True'")
if not int(segment_start_time) % 1 == 0:
raise ValueError("'segment_start_time' must be an integer between 0 and 24'")
if not (24 % segments_per_day == 0) | (segments_per_day == 1):
raise ValueError("'segments_per_day' must be a divisor of 24'")
segment_start_time = str(segment_start_time)
start_times[0] = datetime.datetime.strptime(segment_start_time, '%H')
dt = datetime.timedelta(hours=interval)
# extract wind speeds for each daily segment
for i in range(1, segments_per_day):
start_times[i] = start_times[i - 1] + dt
# extract wind speeds for each month
months_tot = pd.unique(wspds.index.month.values)
for j in months_tot:
anemometers_df = wspds[wspds.index.month == j]
for i in range(0, segments_per_day):
if segments_per_day == 1:
mean_time_wspds[i] = anemometers_df.mean().dropna()
elif i == segments_per_day - 1:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[0].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
else:
start_times[i] = start_times[i].strftime("%H:%M:%S")
start = str(start_times[i].time())
end = str(start_times[i + 1].time())
time_wspds[i] = pd.DataFrame(anemometers_df).between_time(start, end, include_end=False)
mean_time_wspds[i] = time_wspds[i][(time_wspds[i] > min_speed).all(axis=1)].mean().dropna()
# calculate shear
if calc_method == 'power_law':
for i in range(0, len(mean_time_wspds)):
alpha[i], c[i] = Shear._calc_power_law(mean_time_wspds[i].values, heights, return_coeff=True)
alpha_df = pd.concat([alpha_df, alpha], axis=1)
if calc_method == 'log_law':
for i in range(0, len(mean_time_wspds)):
slope[i], intercept[i] = Shear._calc_log_law(mean_time_wspds[i].values, heights,
return_coeff=True)
roughness[i] = Shear._calc_roughness(slope=slope[i], intercept=intercept[i])
roughness_df = pd.concat([roughness_df, roughness], axis=1)
slope_df = pd.concat([slope_df, slope], axis=1)
intercept_df = pd.concat([intercept_df, intercept], axis=1)
# error check
if mean_time_wspds.shape[0] == 0:
raise ValueError('None of the input wind speeds are greater than the min_speed, cannot calculate shear')
if calc_method == 'power_law':
alpha_df.index = start_times
alpha_df.index = alpha_df.index.time
alpha_df.sort_index(inplace=True)
if by_month is True:
alpha_df.columns = [calendar.month_abbr[month] for month in months_tot]
self.plot = plt.plot_shear_time_of_day(Shear._fill_df_12x24(alpha_df), calc_method=calc_method,
plot_type=plot_type)
else:
n_months = len(alpha_df.columns.values)
alpha_df = pd.DataFrame(alpha_df.mean(axis=1))
alpha_df.columns = [str(n_months) + ' Month Average']
df_in = pd.DataFrame((Shear._fill_df_12x24(alpha_df)).iloc[:, 0])
df_in.columns = [str(n_months) + ' Month Average']
self.plot = plt.plot_shear_time_of_day(df_in, calc_method=calc_method, plot_type=plot_type)
alpha_df.index.name = 'segment_start_time'
self._alpha = alpha_df
if calc_method == 'log_law':
roughness_df.index = slope_df.index = intercept_df.index = start_times
roughness_df.index = slope_df.index = intercept_df.index = roughness_df.index.time
roughness_df.sort_index(inplace=True)
slope_df.sort_index(inplace=True)
intercept_df.sort_index(inplace=True)
if by_month is True:
roughness_df.columns = slope_df.columns = intercept_df.columns = \
[calendar.month_abbr[month] for month in months_tot]
self.plot = plt.plot_shear_time_of_day(Shear._fill_df_12x24(roughness_df),
calc_method=calc_method, plot_type=plot_type)
else:
n_months = len(slope_df.columns.values)
slope_df = pd.DataFrame(slope_df.mean(axis=1))
intercept_df = pd.DataFrame(intercept_df.mean(axis=1))
roughness_df = pd.DataFrame(roughness_df.mean(axis=1))
roughness_df.columns = slope_df.columns = intercept_df.columns = \
[str(len(months_tot)) + '_month_average']
df_in = pd.DataFrame(Shear._fill_df_12x24(roughness_df).iloc[:, 0])
df_in.columns = [str(n_months) + ' Month Average']
self.plot = plt.plot_shear_time_of_day(df_in, calc_method=calc_method, plot_type=plot_type)
roughness_df.index.name = 'segment_start_time'
self._roughness = roughness_df
self.calc_method = calc_method
self.wspds = wspds
self.origin = 'TimeOfDay'
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed,
segment_start_time=segment_start_time, segments_per_day=segments_per_day)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
"""
Applies shear calculated to a wind speed time series by time of day (and optionally by month) to scale
wind speed from one height to another.
:param self: TimeOfDay object to use when applying shear to the data.
:type self: TimeOfDay object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeofday_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeofday_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class Average:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', plot_both=False, max_plot_height=None):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, based on the
average wind speeds of each supplied time series.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:return: Average object containing calculated alpha/roughness coefficient values, a plot and other data.
:rtype: Average object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
average_power_law = bw.Shear.Average(anemometers, heights)
average_log_law = bw.Shear.Average(anemometers, heights, calc_method='log_law', max_plot_height=120)
# Get the alpha or roughness values calculated
average_power_law.alpha
average_log_law.roughness
# View plot
average_power_law.plot
average_log_law.plot
# View input data
average_power_law.wspds
average_log_law.wspds
# View other information
pprint.pprint(average_power_law.info)
pprint.pprint(average_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
mean_wspds = wspds.mean(axis=0)
if mean_wspds.shape[0] == 0:
raise ValueError('None of the input wind speeds are greater than the min_speed, cannot calculate shear')
if calc_method == 'power_law':
alpha, c = Shear._calc_power_law(mean_wspds.values, heights, return_coeff=True)
if plot_both is True:
slope, intercept = Shear._calc_log_law(mean_wspds.values, heights, return_coeff=True)
self.plot = plt.plot_power_law(plot_both=True, avg_alpha=alpha, avg_c=c, avg_slope=slope,
avg_intercept=intercept,
wspds=mean_wspds.values, heights=heights,
max_plot_height=max_plot_height)
else:
self.plot = plt.plot_power_law(alpha, c, mean_wspds.values, heights,
max_plot_height=max_plot_height)
self._alpha = alpha
elif calc_method == 'log_law':
slope, intercept = Shear._calc_log_law(mean_wspds.values, heights, return_coeff=True)
roughness = Shear._calc_roughness(slope=slope, intercept=intercept)
self._roughness = roughness
if plot_both is True:
alpha, c = Shear._calc_power_law(mean_wspds.values, heights, return_coeff=True)
self.plot = plt.plot_power_law(avg_alpha=alpha, avg_c=c, avg_slope=slope, avg_intercept=intercept,
wspds=mean_wspds.values, heights=heights,
max_plot_height=max_plot_height)
else:
self.plot = plt.plot_log_law(slope, intercept, mean_wspds.values, heights,
max_plot_height=max_plot_height)
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
self.wspds = wspds
self.origin = 'Average'
self.calc_method = calc_method
self.info = Shear._create_info(self, heights=heights, min_speed=min_speed, cvg=cvg)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
"""
Applies average shear calculated to a wind speed time series to scale wind speed from one height to another.
:param self: Average object to use when applying shear to the data.
:type self: Average object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
average_power_law = bw.Shear.Average(anemometers, heights)
average_log_law = bw.Shear.Average(anemometers, heights, calc_method='log_law', max_plot_height=120)
# Scale wind speeds using exponents
average_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
average_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class BySector:
def __init__(self, wspds, heights, wdir, min_speed=3, calc_method='power_law', sectors=12,
direction_bin_array=None, direction_bin_labels=None):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by direction. The alpha/roughness coefficient values are calculated based on the average wind speeds
at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights
:type heights: list
:param wdir: Wind direction measurements
:type wdir: pandas.DataFrame or Series
:param: min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type: min_speed: float
:param calc_method: Method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param sectors: Number of sectors for the shear to be calculated for.
:type sectors: int
:param direction_bin_array: Specific array of directional bins to be used. If None, bins are calculated
by 360/sectors.
:type direction_bin_array: list or array
:param direction_bin_labels: Labels to be given to the above direction_bin array.
:type direction_bin_labels: list or array
:return: BySector object containing calculated alpha/roughness coefficient values, a plot and other data.
:rtype: BySector object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
directions = data['Dir78mS']
# Calculate shear exponents using default bins ([345,15,45,75,105,135,165,195,225,255,285,315,345])
by_sector_power_law= bw.Shear.BySector(anemometers, heights, directions)
by_sector_log_law= bw.Shear.BySector(anemometers, heights, directions, calc_method='log_law')
# Calculate shear exponents using custom bins
custom_bins = [0,30,60,90,120,150,180,210,240,270,300,330,360]
by_sector_power_law_custom_bins = bw.Shear.BySector(anemometers, heights, directions,
direction_bin_array=custom_bins)
# Get alpha or roughness values calculated
by_sector_power_law.alpha
by_sector_log_law.roughness
# View plot
by_sector_power_law.plot
by_sector_log_law.plot
# View input data
by_sector_power_law.wspds
by_sector_log_law.wspds
# View other information
pprint.pprint(by_sector_power_law.info)
pprint.pprint(by_sector_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
if direction_bin_array is not None:
sectors = len(direction_bin_array) - 1
wdir = _convert_df_to_series(wdir)
mean_wspds = pd.Series([])
mean_wspds_df = pd.DataFrame([])
count_df = pd.DataFrame([])
count = pd.Series([])
for i in range(len(wspds.columns)):
w = wspds.iloc[:, i]
plot, mean_wspds[i] = dist_by_dir_sector(w, wdir, direction_bin_array=direction_bin_array,
sectors=sectors,
aggregation_method='mean', return_data=True)
plot, count[i] = dist_by_dir_sector(w, wdir, direction_bin_array=direction_bin_array,
sectors=sectors,
aggregation_method='count', return_data=True)
if i == 0:
mean_wspds_df = mean_wspds[i].copy()
count_df = count[i].copy()
else:
mean_wspds_df = pd.concat([mean_wspds_df, mean_wspds[i]], axis=1)
count_df = pd.concat([count_df, count[i]], axis=1)
count_df = count_df.mean(axis=1)
wind_rose_plot, wind_rose_dist = dist_by_dir_sector(wspds.iloc[:, 0], wdir,
direction_bin_array=direction_bin_array,
sectors=sectors,
direction_bin_labels=direction_bin_labels,
return_data=True)
if calc_method == 'power_law':
alpha = mean_wspds_df.apply(Shear._calc_power_law, heights=heights, return_coeff=False, axis=1)
wind_rose_plot, wind_rose_dist = dist_by_dir_sector(wspds.iloc[:, 0], wdir,
direction_bin_array=direction_bin_array,
sectors=sectors,
direction_bin_labels=direction_bin_labels,
return_data=True)
self.alpha_count = count_df
self._alpha = pd.Series(alpha, name='alpha')
clear_output()
self.plot = plt.plot_shear_by_sector(scale_variable=alpha, wind_rose_data=wind_rose_dist,
calc_method=calc_method)
elif calc_method == 'log_law':
slope_intercept = mean_wspds_df.apply(Shear._calc_log_law, heights=heights, return_coeff=True, axis=1)
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness = Shear._calc_roughness(slope=slope, intercept=intercept)
self.roughness_count = count_df
self._roughness = pd.Series(roughness, name='roughness_coefficient')
clear_output()
self.plot = plt.plot_shear_by_sector(scale_variable=roughness, wind_rose_data=wind_rose_dist,
calc_method=calc_method)
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
self.wspds = wspds
self.wdir = wdir
self.origin = 'BySector'
self.sectors = sectors
self.calc_method = calc_method
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed,
direction_bin_array=direction_bin_array)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, wdir, height, shear_to):
"""
Applies shear calculated to a wind speed time series by wind direction to scale
wind speed from one height to another.
:param self: BySector object to use when applying shear to the data
:type self: BySector object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param wdir: Wind direction measurements of wspds, only required if shear is to be applied by direction
sector.
:type wdir: pandas.Series
:param height: Height of wspds.
:type height: float
:param shear_to: Height to which wspds should be scaled to.
:type shear_to: float
:return: A pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS']]
heights = [80, 60]
directions = data[['Dir78mS']]
# Calculate shear exponents
by_sector_power_law = bw.Shear.BySector(anemometers, heights, directions)
by_sector_log_law = bw.Shear.BySector(anemometers, heights, directions, calc_method='log_law')
# Calculate shear exponents using default bins ([345,15,45,75,105,135,165,195,225,255,285,315,345])
by_sector_power_law= bw.Shear.BySector(anemometers, heights, directions)
# Scale wind speeds using exponents
by_sector_power_law.apply(data['Spd40mN'], data['Dir38mS'], height=40, shear_to=70)
by_sector_log_law.apply(data['Spd40mN'], data['Dir38mS'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds=wspds, height=height, shear_to=shear_to, wdir=wdir)
@staticmethod
def _log_roughness_scale(wspds, height, shear_to, roughness):
"""
Scale wind speeds using the logarithmic wind shear law.
:param wspds: wind speeds at height z1, U1
:param height: z1
:param shear_to: z2
:param roughness: z0
:return: Scaled wind speeds, U2
:rtype: pandas.Series or float
METHODOLOGY:
U2 = (ln(z2/z0)/ln(z1/z0))U1
Where:
- U2 is the wind speed at height z2
- U1 is the wind speed at height z1
- z1 is the lower height
- z2 is the upper height
- zo is the roughness coefficient
"""
scale_factor = np.log(shear_to / roughness) / (np.log(height / roughness))
scaled_wspds = wspds * scale_factor
return scaled_wspds
@staticmethod
def _calc_log_law(wspds, heights, return_coeff=False, maximise_data=False) -> (np.array, float):
"""
Derive the best fit logarithmic law line from a given time-step of speed data at 2 or more elevations
:param wspds: List of wind speeds [m/s]
:param heights: List of heights [m above ground]. The position of the height in the list must be the same
position in the list as its corresponding wind speed value.
:return: The slope and intercept of the best fit line, as defined above
:rtype: pandas.Series and float
METHODOLOGY:
Derive natural log of elevation data sets
Derive coefficients of linear best fit along ln(heights)- wspds distribution
Characterise new distribution of speed values based on linear best fit
Return the slope and the intercept of this linear best fit.
The slope and intercept can then be used to find the corresponding roughness coefficient, using the
equivilant laws:
1) $U(z) = (v/k)*ln(z/zo)$
which can be rewritten as:
$U(z) = (v/k)*ln(z) - (v/k)ln(zo)$
where zo = e ** (-c/m) of this line
Where:
- U(z) is the wind speed at height z
- v is the friction velocity at the location
- k is the Von Karmen constant, taken aa .4
- z is the height
- zo is the roughness coefficient
2) $U2 = (ln(z2/z0)/ln(z1/z0))U1$
"""
if maximise_data:
log_heights = np.log(
pd.Series(heights).drop(wspds[wspds == 0].index.values.astype(int))) # take log of elevations
wspds = wspds.drop(wspds[wspds == 0].index.values.astype(int))
else:
log_heights = np.log(heights) # take log of elevations
coeffs = np.polyfit(log_heights, wspds, deg=1)
if return_coeff:
return pd.Series([coeffs[0], coeffs[1]])
return coeffs[0]
@staticmethod
def _calc_power_law(wspds, heights, return_coeff=False, maximise_data=False) -> (np.array, float):
"""
Derive the best fit power law exponent (as 1/alpha) from a given time-step of speed data at 2 or more elevations
:param wspds: pandas.Series or list of wind speeds [m/s]
:param heights: List of heights [m above ground]. The position of the height in the list must be the same
position in the list as its corresponding wind speed value.
:return: The shear value (alpha), as the inverse exponent of the best fit power law, based on the form:
$(v1/v2) = (z1/z2)^(1/alpha)$
:rtype: pandas.Series and float
METHODOLOGY:
Derive natural log of elevation and speed data sets
Derive coefficients of linear best fit along log-log distribution
Characterise new distribution of speed values based on linear best fit
Derive 'alpha' based on gradient of first and last best fit points (function works for 2 or more points)
Return alpha value
"""
if maximise_data:
log_heights = np.log(pd.Series(heights).drop(wspds[wspds == 0].index.values.astype(int)))
log_wspds = np.log(wspds.drop(wspds[wspds == 0].index.values.astype(int)))
else:
log_heights = np.log(heights) # take log of elevations
log_wspds = np.log(wspds) # take log of speeds
coeffs = np.polyfit(log_heights, log_wspds, deg=1) # get coefficients of linear best fit to log distribution
if return_coeff:
return pd.Series([coeffs[0], np.exp(coeffs[1])])
return coeffs[0]
@staticmethod
def _calc_roughness(slope, intercept):
return e**(-intercept/slope)
@staticmethod
def _by_12x24(wspds, heights, min_speed=3, return_data=False, var_name='Shear'):
tab_12x24 = dist_12x24(wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
axis=1), return_data=True)[1]
if return_data:
return plt.plot_12x24_contours(tab_12x24, label=(var_name, 'mean')), tab_12x24
return plt.plot_12x24_contours(tab_12x24, label=(var_name, 'mean'))
@staticmethod
def scale(wspd, height, shear_to, alpha=None, roughness=None, calc_method='power_law'):
"""
Scales wind speeds from one height to another given a value of alpha or roughness coefficient (zo)
:param wspd: Wind speed time series to apply shear to.
:type wspd: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: Height to which wspd should be scaled to.
:type shear_to: float
:param alpha: Shear exponent to be used when scaling wind speeds.
:type alpha: Float
:param roughness: Roughness coefficient to be used when scaling wind speeds.
:type roughness: float
:param calc_method: calculation method used to scale the wind speed.
Using either: 1) 'power_law' : $(v1/v2) = (z1/z2)^(1/alpha)$
2) 'log_law': $v2 = (ln(z2/z0)/ln(z1/z0))v1$
:type calc_method: string
:return: a pandas series of the scaled wind speed
:return: pandas.Series or float
**Example Usage**
::
# Scale wind speeds using exponents
# Specify alpha to use
alpha_value = .2
# Specify roughness coefficient to use
zo = .03
height = 40
shear_to = 80
scaled_by_power_law = bw.Shear.scale(data['Spd40mN'], height, shear_to, alpha=alpha_value)
scaled_by_log_law = bw.Shear.scale(data['Spd40mN'], height, shear_to, roughness=zo, calc_method='log_law')
"""
return Shear._scale(wspds=wspd, height=height, shear_to=shear_to, calc_method=calc_method,
alpha=alpha, roughness=roughness)
@staticmethod
def _scale(wspds, height, shear_to, calc_method='power_law', alpha=None, roughness=None, origin=None):
"""
Private function for execution of scale()
"""
if not isinstance(wspds, pd.Series):
wspds = pd.Series(wspds)
if calc_method == 'power_law':
scale_factor = (shear_to / height) ** alpha
scaled_wspds = wspds * scale_factor
elif calc_method == 'log_law':
if origin == 'TimeSeries':
scaled_wspds = Shear._log_roughness_scale(wspds=wspds, height=height,
shear_to=shear_to, roughness=roughness)
else:
scaled_wspds = wspds.apply(Shear._log_roughness_scale, args=(height, shear_to, roughness))
else:
raise ValueError("Please enter a valid calculation method, either 'power_law' or 'log_law'.")
return scaled_wspds
@staticmethod
def _apply(self, wspds, height, shear_to, wdir=None):
scaled_wspds = pd.Series([])
result = pd.Series([])
if self.origin == 'TimeSeries':
if self.calc_method == 'power_law':
df = pd.concat([wspds, self.alpha], axis=1).dropna()
scaled_wspds = Shear._scale(wspds=df.iloc[:, 0], height=height, shear_to=shear_to,
calc_method='power_law', alpha=df.iloc[:, 1])
else:
df = pd.concat([wspds, self.roughness], axis=1).dropna()
scaled_wspds = Shear._scale(wspds=df.iloc[:, 0], height=height, shear_to=shear_to,
calc_method=self.calc_method,
roughness=self._roughness, origin=self.origin)
result = scaled_wspds.dropna()
if self.origin == 'TimeOfDay':
if self.calc_method == 'power_law':
filled_alpha = Shear._fill_df_12x24(self.alpha)
else:
filled_roughness = Shear._fill_df_12x24(self._roughness)
filled_alpha = filled_roughness
df_wspds = [[None for y in range(12)] for x in range(24)]
f = FloatProgress(min=0, max=24 * 12, description='Calculating', bar_style='success')
display(f)
for i in range(0, 24):
for j in range(0, 12):
if i == 23:
df_wspds[i][j] = wspds[
(wspds.index.time >= filled_alpha.index[i]) & (wspds.index.month == j + 1)]
else:
df_wspds[i][j] = wspds[
(wspds.index.time >= filled_alpha.index[i]) & (wspds.index.time < filled_alpha.index[i + 1])
& (wspds.index.month == j + 1)]
if self.calc_method == 'power_law':
df_wspds[i][j] = Shear._scale(df_wspds[i][j], shear_to=shear_to, height=height,
alpha=filled_alpha.iloc[i, j], calc_method=self.calc_method)
else:
df_wspds[i][j] = Shear._scale(df_wspds[i][j], shear_to=shear_to, height=height,
roughness=filled_roughness.iloc[i, j],
calc_method=self.calc_method)
scaled_wspds = pd.concat([scaled_wspds, df_wspds[i][j]], axis=0)
f.value += 1
result = scaled_wspds.sort_index()
f.close()
if self.origin == 'BySector':
# initialise series for later use
bin_edges = pd.Series([])
by_sector = pd.Series([])
if self.calc_method == 'power_law':
direction_bins = self.alpha
else:
direction_bins = self._roughness
# join wind speeds and directions together in DataFrame
df = pd.concat([wspds, wdir], axis=1)
df.columns = ['Unscaled_Wind_Speeds', 'Wind_Direction']
# get directional bin edges from Shear.by_sector output
for i in range(self.sectors):
bin_edges[i] = float(re.findall(r"[-+]?\d*\.\d+|\d+", direction_bins.index[i])[0])
if i == self.sectors - 1:
bin_edges[i + 1] = -float(re.findall(r"[-+]?\d*\.\d+|\d+", direction_bins.index[i])[1])
for i in range(0, self.sectors):
if bin_edges[i] > bin_edges[i + 1]:
by_sector[i] = df[
(df['Wind_Direction'] >= bin_edges[i]) | (df['Wind_Direction'] < bin_edges[i + 1])]
elif bin_edges[i + 1] == 360:
by_sector[i] = df[(df['Wind_Direction'] >= bin_edges[i])]
else:
by_sector[i] = df[
(df['Wind_Direction'] >= bin_edges[i]) & (df['Wind_Direction'] < bin_edges[i + 1])]
by_sector[i].columns = ['Unscaled_Wind_Speeds', 'Wind_Direction']
if self.calc_method == 'power_law':
scaled_wspds[i] = Shear._scale(wspds=by_sector[i]['Unscaled_Wind_Speeds'], height=height,
shear_to=shear_to,
calc_method=self.calc_method, alpha=self.alpha[i])
elif self.calc_method == 'log_law':
scaled_wspds[i] = Shear._scale(wspds=by_sector[i]['Unscaled_Wind_Speeds'], height=height,
shear_to=shear_to,
calc_method=self.calc_method,
roughness=self._roughness[i])
if i == 0:
result = scaled_wspds[i]
else:
result = pd.concat([result, scaled_wspds[i]], axis=0)
result.sort_index(axis='index', inplace=True)
if self.origin == 'Average':
if wdir is not None:
warnings.warn('Warning: Wind direction will not be accounted for when calculating scaled wind speeds.'
' The shear exponents for this object were not calculated by sector. '
'Check the origin of the object using ".origin". ')
if self.calc_method == 'power_law':
result = Shear._scale(wspds=wspds, height=height, shear_to=shear_to,
calc_method=self.calc_method, alpha=self.alpha)
elif self.calc_method == 'log_law':
result = Shear._scale(wspds=wspds, height=height, shear_to=shear_to,
calc_method=self.calc_method, roughness=self._roughness)
new_name = wspds.name + '_scaled_to_' + str(shear_to) + 'm'
result.rename(new_name, inplace=True)
return result
@staticmethod
def _fill_df_12x24(data):
"""
Fills a pandas.DataFrame or Series to be a 12 month x 24 hour pandas.DataFrame by duplicating entries.
Used for plotting TimeOfDay shear.
:param data: pandas.DataFrame or Series to be turned into a 12x24 dataframe
:type data: pandas.Series or pandas.DataFrame.
:return: 12x24 pandas.DataFrame
"""
# create copy for later use
df_copy = data.copy()
interval = int(24 / len(data))
# set index for new data frame to deal with less than 24 sectors
idx = | pd.date_range('2017-01-01 00:00', '2017-01-01 23:00', freq='1H') | pandas.date_range |
# -*- coding: utf-8 -*-
import pandas as pd, numpy as np
from matplotlib import pyplot as plt
import os
import seaborn as sns
import matplotlib.colors as mcolors
def plot_metrics(out_fld, selected_ids, pp_colors, labels, metrics, all_runs_mae, figsize=(8, 5), suffix='',
ylim_top=0.1, disaggregate=False):
plt.rcParams.update({'font.size': 16})
all_runs_mae_mean = dict()
for set_name in all_runs_mae:
all_runs_mae_mean[set_name] = np.mean(all_runs_mae[set_name], axis=2)
n_runs = all_runs_mae_mean[set_name].shape[0]
palette = [pp_colors[i] for i in selected_ids]
series_1 = pd.Series()
series_2 = pd.Series()
series_3 = pd.Series()
series_4 = pd.Series()
for i, pp in enumerate(labels[selected_ids]):
for set_name in all_runs_mae_mean:
series_1 = series_1.append(pd.Series(all_runs_mae_mean[set_name][:, selected_ids[i]]), ignore_index=True)
series_2 = series_2.append(pd.Series([set_name] * n_runs), ignore_index=True)
series_3 = series_3.append(pd.Series(np.arange(n_runs)), ignore_index=True)
series_4 = series_4.append(pd.Series([pp.replace('(amazon)', '[3]').replace('(naver)', '[2]')] * n_runs),
ignore_index=True)
display_df = pd.DataFrame({'absolute error': series_1,
'set': series_2,
'run': series_3,
'Performance Predictor': series_4})
plt.figure(figsize=figsize);
# plt.title('Absolute Error of Performance Predictors')
sns.boxplot(x="set", y="absolute error", hue="Performance Predictor",
data=display_df, palette=palette, fliersize=0)
plt.ylim(bottom=-0.001); # , ylim_top);
plt.legend(); # bbox_to_anchor=(1.4, 1.05));
plt.tight_layout()
plt.savefig(os.path.join(out_fld, 'abs_error' + suffix + '.png'), bbox_inches='tight')
plt.close()
for name, all_runs_metric in metrics:
plt.figure(figsize=(12, 8));
for i, lab in enumerate(labels):
if i in selected_ids:
metric_all = []
for set_name in all_runs_metric:
metric_all.append(all_runs_metric[set_name][:, i])
y = np.mean(metric_all, axis=1)
lo = np.quantile(metric_all, 0.1, axis=1)
hi = np.quantile(metric_all, 0.9, axis=1)
plt.plot(y, label=labels[i].replace('(amazon)', '[3]').replace('(naver)', '[2]'), color=pp_colors[i],
marker='o', linewidth=2, markersize=8);
plt.fill_between(np.arange(len(all_runs_metric.keys())), lo, hi, alpha=0.3, color=pp_colors[i])
plt.legend(); # bbox_to_anchor=(1.2, 1.05));
plt.xticks(range(len(all_runs_metric.keys())), all_runs_metric.keys());
# plt.ylim(-0.1, 1.1);
# plt.hlines(y=0.0, xmin=0, xmax=len(all_runs_metric.keys()), linestyles='dashed');
# plt.title("%s of Performance Predictors" % name);
plt.ylabel('%s' % name.replace('auc_sigma_mae', r'$MAE_{CI}$').replace('within_ci_mae', r'$MAE_{CI_{0.05}}$'));
plt.tight_layout()
plt.savefig(os.path.join(out_fld, '%s%s.png' % (name, suffix)), bbox_inches='tight')
plt.close()
# desaggregated results
if disaggregate:
for run in range(n_runs):
series_1 = | pd.Series() | pandas.Series |
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import random
import matplotlib.pyplot as plt
import seaborn as sns
import Levenshtein
from tqdm import tqdm
from torch.utils import data
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from attention import Attention
from utils import LETTER_LIST, letter2index, index2letter
from search import greedy_search, beam_search
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
def train(model, train_loader, criterion, optimizer, mode='train', TF_rate=0.95):
model.train()
running_entropy_loss = 0.0
running_perx_loss = 0.0
running_dist = 0.0
for i, (packed_x, packed_y, x_lengths, y_lengths) in enumerate(tqdm(train_loader)):
optimizer.zero_grad()
x, y, x_len, y_len = packed_x.to(device), packed_y.to(device), x_lengths.to(device), y_lengths.to(device)
y, _ = pad_packed_sequence(y, batch_first=True)
predictions, attentions = model(x, x_len, y, mode='train', TF_rate=TF_rate)
b, seq_max, _ = predictions.shape
# generate a mask to cross-out those padding's KL-divergence
mask = Variable(torch.zeros(b, seq_max), requires_grad=False).to(device)
for k in range(b):
mask[k][:y_len[k]] = 1
loss = criterion(predictions.view(-1, predictions.shape[2]), y.view(-1))
avg_masked_loss = torch.sum(loss * mask.view(-1)) / torch.sum(mask)
running_entropy_loss += float(avg_masked_loss.item())
running_perx_loss += float(torch.exp(avg_masked_loss).item())
avg_masked_loss.backward()
# visualize graident distribution and attention plot for debugging purpose
# if you are using jupyter notebook, you can comment out the below to see plots
# if (i == 0):
# plot_grad_flow(model.named_parameters(), i, i)
# plot_attention(attentions)
# clipping to avoid graident exploding
clipping_value = 1.0
torch.nn.utils.clip_grad_norm(model.parameters(), clipping_value)
optimizer.step()
# translation for Lev Distance, use greedy search during training
pred_strs = greedy_search(torch.argmax(predictions, dim=-1).detach().cpu().numpy())
ans_strs = greedy_search(y.detach().cpu().numpy())
running_dist += np.mean([Levenshtein.distance(pred, ans) for pred, ans in zip(pred_strs, ans_strs)])
# clear cuda cache for memory issue
del x, y, x_len, y_len
torch.cuda.empty_cache()
running_entropy_loss /= (i+1)
running_perx_loss /= (i+1)
running_dist /= (i+1)
print("Train Result: Cross Entropy Loss : {:.3f} and Perplex Loss : {:.3f} and Lev Dist : {:.3f}".format(running_entropy_loss, running_perx_loss, running_dist))
return running_entropy_loss, running_perx_loss, running_dist
def val(model, valid_loader, criterion, beam_width=8):
model.eval()
running_entropy_loss = 0.0
running_perx_loss = 0.0
running_dist = 0.0
with torch.no_grad():
for i, (packed_x, packed_y, x_lengths, y_lengths) in enumerate(tqdm(valid_loader)):
x, y, x_len, y_len = packed_x.to(device), packed_y.to(device), x_lengths.to(device), y_lengths.to(device)
y, _ = pad_packed_sequence(y, batch_first=True)
# one predictions is ending-controlled for loss computation
# another is free-generation util max-len reached
predictions, attentions = model(x, x_len, y, mode='eval')
predictions_loss, attentions = model(x, x_len, y, mode='train')
# generate a mask to cross-out those padding's KL-divergence
b, seq_max, _ = predictions_loss.shape
mask = Variable(torch.zeros(b, seq_max), requires_grad=False).to(device)
for k in range(b):
mask[k][:y_len[k]] = 1
loss = criterion(predictions_loss.view(-1, predictions_loss.shape[2]), y.view(-1))
avg_masked_loss = torch.sum(loss * mask.view(-1)) / torch.sum(mask)
running_entropy_loss += float(avg_masked_loss.item())
running_perx_loss += float(torch.exp(avg_masked_loss).item())
# translation for Lev Distance, using beam search in validation
beam_pred_strs = beam_search(F.softmax(predictions, dim=-1).detach().cpu().numpy(), beam_width)
ans_strs = greedy_search(y.detach().cpu().numpy())
running_dist += np.mean([Levenshtein.distance(pred, ans) for pred, ans in zip(beam_pred_strs, ans_strs)])
del x, y, x_len, y_len
torch.cuda.empty_cache()
running_entropy_loss /= (i+1)
running_perx_loss /= (i+1)
running_dist /= (i+1)
print("Valid: Cross Entropy Loss : {:.3f} and Perplex Loss : {:.3f} and Lev Dist(beam width={}) : {:.3f}".format(running_entropy_loss, running_perx_loss, beam_width, running_dist))
return running_entropy_loss, running_perx_loss, running_dist
def inference(model, test_loader, output_path="../output", beam_width=8):
"""
inference
"""
def test(model, test_loader, beam_width):
"""
Generate testing string transaltion for the test dataset
"""
model.eval()
str_predictions = []
with torch.no_grad():
for i, (packed_x, x_lengths) in enumerate(tqdm(test_loader)):
x, x_len = packed_x.to(device), x_lengths.to(device)
predictions, attentions = model(x, x_len, y=None, mode='eval')
# You can choose to use greedy search(more efficient) or beam search(more exploratory)
# pred_strs = greedy_search(torch.argmax(predictions, dim=-1).detach().cpu().numpy())
pred_strs = beam_search(F.softmax(predictions, dim=-1).detach().cpu().numpy(), beam_width)
str_predictions.extend(pred_strs)
del x, x_len
torch.cuda.empty_cache()
return str_predictions
def output(predictions, output_path):
"""
Output the inference result to the proper csv file with column header for submission
@param:
inferences [List] : inferenced state label for test dataset
"""
df = | pd.DataFrame(predictions, columns=["label"]) | pandas.DataFrame |
import re
import json
import os
import string
import pickle
import datetime
import ipdb
import pandas as pd
from reuter_data import logging
NEWS_MONTH = ['07', '08', '09', '10']
NEWS_NUMBER = [14793, 11978, 11337, 9743]
def clean_sentence(s):
s = re.sub("\n", " ", s)
s = re.sub("[" + string.punctuation + "]", " ", s)
s = re.sub(" +", " ", s)
return s.strip()
def collect_webhose_news():
df = pd.DataFrame()
for i, month in enumerate(NEWS_MONTH):
for index in range(1, NEWS_NUMBER[i] + 1):
logging('{0:s}: {1:05d}/{2:05d}'.format(month, index, NEWS_NUMBER[i]))
news_path = "data/{0:s}/news_{1:07d}.json".format(month, index)
with open(news_path) as f:
datum_json = json.load(f)
datum = pd.Series(
data={
'text':datum_json['text'],
'published_time':datum_json['published'],
'country':datum_json['thread']['country'],
'title':datum_json['thread']['title'],
'site':datum_json['thread']['site']
}
)
df = df.append(datum, ignore_index=True)
df.to_csv('webhose_data.csv', index=False)
ipdb.set_trace()
def read_news_dataframe(news_per_day):
df = pd.DataFrame()
date_news_count = {}
for i, month in enumerate(NEWS_MONTH):
for index in range(NEWS_NUMBER[i]):
news_path = "data/{0:s}/news_{1:07d}.json".format(month, NEWS_NUMBER[i] - index)
with open(news_path) as f:
datum_json = json.load(f)
publish_time = pd.to_datetime(datum_json['published'])
date_str = publish_time.strftime('%Y-%m-%d')
start_time = datetime.datetime.strptime('{0:s} 0930'.format(date_str), '%Y-%m-%d %H%M')
end_time = datetime.datetime.strptime('{0:s} 1600'.format(date_str), '%Y-%m-%d %H%M')
if date_str not in date_news_count:
date_news_count[date_str] = 0
if date_news_count[date_str] > news_per_day:
continue
if publish_time <= start_time or publish_time >= end_time:
continue
if datum_json['thread']['country'] != 'US' or 'finance' not in str(datum_json):
continue
text = clean_sentence(datum_json['text'])
if len(text.split(' ')) < 100:
continue
date_news_count[date_str] += 1
datum = pd.Series(
data={
'text':text,
'date':date_str
}
)
df = df.append(datum, ignore_index=True)
#pickle.dump(date_news_count, open("data/date_news_count.p", "wb"))
return df
def read_SNP_dataframe():
snp = pd.read_csv("data/GSPC.csv")
snp['target'] = pd.Series('2015-06-30').append(snp['Date'][0:-1], ignore_index=True)
for offset in range(1, 6):
name = 'previous_price_{0:d}'.format(offset)
snp[name] = pd.Series([None for _ in range(offset)]).append(snp['Close'][0:-offset] - snp['Open'][0:-offset], ignore_index=True)
return snp
def find_price(ticker, timestamp):
date_str = timestamp.strftime('%Y%m%d')
path = 'data/SNP/{0:s}/price/price_{1:s}.json'.format(ticker, date_str)
if not os.path.exists(path):
return None
with open(path, 'rb') as f:
data = pickle.load(f)
start_time = datetime.datetime.strptime(date_str + ' 09:30', '%Y%m%d %H:%M')
offset = int((timestamp - start_time).total_seconds() // 60)
return data[offset]
def generate_reuter_price():
reuter = pd.read_csv('reuter_data.csv')
reuter['published_time'] = pd.to_datetime(reuter['published_time'])
reuter.sort_values('published_time', inplace=True)
twenty_min = datetime.timedelta(minutes=20)
df = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
module_path = os.path.abspath(os.path.join('.'))
if module_path not in sys.path:
sys.path.append(module_path)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
hippocampusAreas = {
'CA1' : 1,
'CA2' : 2,
'CA3' : 3,
'DG' : 4,
'Outer' : 0
}
def loadFromFile(fileName = "") :
if fileName == "" :
return
unprocessedDataSet = pd.read_csv(fileName)
# convert to labels to numbers
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA1', ['Y']] = 1
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA2', ['Y']] = 2
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA3', ['Y']] = 3
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'DG', ['Y']] = 4
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'Outer', ['Y']] = 0
#remove bogus records
unprocessedDataSet = unprocessedDataSet.replace([np.inf, -np.inf], np.nan)
unprocessedDataSet = unprocessedDataSet.dropna();
nulls = np.where(pd.isnull(unprocessedDataSet))
#Main Dataset
hipAreaData = unprocessedDataSet.drop(unprocessedDataSet.index[nulls[0]])
#Get all Non hippocampal pixels
isOuterData = hipAreaData['Y']==0
outerData = hipAreaData[isOuterData]
#Get all Hippocampal pixels: CA1, CA2, CA3, DG
isCA1Data = hipAreaData['Y']==1
CA1Data = hipAreaData[isCA1Data]
isCA2Data = hipAreaData['Y']==2
CA2Data = hipAreaData[isCA2Data]
isCA3Data = hipAreaData['Y']==3
CA3Data = hipAreaData[isCA3Data]
isDgData = hipAreaData['Y']==4
dgData = hipAreaData[isDgData]
#stack all hippocampall pixels together
data = [CA1Data, CA2Data, CA3Data, dgData]
xDevDataset = | pd.concat(data) | pandas.concat |
from dataclasses import dataclass, field, asdict
from typing import Dict, List, Union, Iterable
import pandas as pd
from pandas import DataFrame
from PyQt5 import QtCore, QtGui, QtWidgets, sip
from PyQt5.QtCore import Qt
import traceback
from functools import wraps
from datetime import datetime
from pandasgui.utility import get_logger, unique_name, in_interactive_console
import os
import collections
logger = get_logger(__name__)
@dataclass
class Settings:
# Should GUI block code execution until closed
block: bool
# Are table cells editable
editable: bool = True
style: str = "Fusion"
def __init__(self, block=None):
# Default blocking behavior
if block is None:
if in_interactive_console():
# Don't block if in an interactive console (so you can view GUI and still continue running commands)
self.block = False
else:
# If in a script, we need to block or the script will continue and finish without allowing GUI interaction
self.block = True
@dataclass
class Filter:
expr: str
enabled: bool
failed: bool
@dataclass
class HistoryItem:
name: str
args: tuple
kwargs: dict
time: str
def track_history(func):
@wraps(func)
def wrapper(pgdf, *args, **kwargs):
history_item = HistoryItem(name=func.__name__,
args=args,
kwargs=kwargs,
time=datetime.now().strftime("%H:%M:%S"))
pgdf.history.append(history_item)
return func(pgdf, *args, **kwargs)
return wrapper
class PandasGuiDataFrame:
def __init__(self, df: DataFrame, name: str = 'Untitled'):
super().__init__()
df = df.copy()
self.dataframe = df
self.dataframe_original = df
self.name = name
self.history: List[HistoryItem] = []
# References to other object instances that may be assigned later
self.settings: Settings = Settings()
self.store: Union[Store, None] = None
self.dataframe_explorer: Union["DataFrameExplorer", None] = None
self.dataframe_viewer: Union["DataFrameViewer", None] = None
self.filter_viewer: Union["FilterViewer", None] = None
self.column_sorted: Union[int, None] = None
self.index_sorted: Union[int, None] = None
self.sort_is_ascending: Union[bool, None] = None
self.filters: List[Filter] = []
# Refresh PyQt models when the underlying pgdf is changed in anyway that needs to be reflected in the GUI
def update(self):
models = []
if self.dataframe_viewer is not None:
models += [self.dataframe_viewer.dataView.model(),
self.dataframe_viewer.columnHeader.model(),
self.dataframe_viewer.indexHeader.model(),
self.dataframe_viewer.columnHeaderNames.model(),
self.dataframe_viewer.indexHeaderNames.model(),
]
if self.filter_viewer is not None:
models += [self.filter_viewer.list_model,
]
for model in models:
model.beginResetModel()
model.endResetModel()
for view in [self.dataframe_viewer.columnHeader,
self.dataframe_viewer.indexHeader]:
view.set_spans()
@track_history
def edit_data(self, row, col, value, skip_update=False):
# Not using iat here because it won't work with MultiIndex
self.dataframe_original.at[self.dataframe.index[row], self.dataframe.columns[col]] = value
if not skip_update:
self.apply_filters()
self.update()
@track_history
def paste_data(self, top_row, left_col, df_to_paste):
# Not using iat here because it won't work with MultiIndex
for i in range(df_to_paste.shape[0]):
for j in range(df_to_paste.shape[1]):
value = df_to_paste.iloc[i, j]
self.edit_data(top_row + i, left_col + j, value, skip_update=True)
self.apply_filters()
self.update()
@track_history
def sort_column(self, ix: int):
col_name = self.dataframe.columns[ix]
# Clicked an unsorted column
if ix != self.column_sorted:
self.dataframe = self.dataframe.sort_values(col_name, ascending=True, kind="mergesort")
self.column_sorted = ix
self.sort_is_ascending = True
# Clicked a sorted column
elif ix == self.column_sorted and self.sort_is_ascending:
self.dataframe = self.dataframe.sort_values(col_name, ascending=False, kind="mergesort")
self.column_sorted = ix
self.sort_is_ascending = False
# Clicked a reverse sorted column - reset to the original unsorted order
elif ix == self.column_sorted:
unsorted_index = self.dataframe_original[self.dataframe_original.index.isin(self.dataframe.index)].index
self.dataframe = self.dataframe.reindex(unsorted_index)
self.column_sorted = None
self.sort_is_ascending = None
self.index_sorted = None
self.update()
@track_history
def sort_index(self, ix: int):
# Clicked an unsorted index level
if ix != self.index_sorted:
self.dataframe = self.dataframe.sort_index(level=ix, ascending=True, kind="mergesort")
self.index_sorted = ix
self.sort_is_ascending = True
# Clicked a sorted index level
elif ix == self.index_sorted and self.sort_is_ascending:
self.dataframe = self.dataframe.sort_index(level=ix, ascending=False, kind="mergesort")
self.index_sorted = ix
self.sort_is_ascending = False
# Clicked a reverse sorted index level - reset to the original unsorted order
elif ix == self.index_sorted:
unsorted_index = self.dataframe_original[self.dataframe_original.index.isin(self.dataframe.index)].index
self.dataframe = self.dataframe.reindex(unsorted_index)
self.index_sorted = None
self.sort_is_ascending = None
self.column_sorted = None
self.update()
@track_history
def add_filter(self, expr: str, enabled=True):
filt = Filter(expr=expr, enabled=enabled, failed=False)
self.filters.append(filt)
self.apply_filters()
@track_history
def remove_filter(self, index: int):
self.filters.pop(index)
self.apply_filters()
@track_history
def edit_filter(self, index: int, expr: str):
filt = self.filters[index]
filt.expr = expr
filt.failed = False
self.apply_filters()
@track_history
def toggle_filter(self, index: int):
self.filters[index].enabled = not self.filters[index].enabled
self.apply_filters()
def apply_filters(self):
df = self.dataframe_original
for ix, filt in enumerate(self.filters):
if filt.enabled and not filt.failed:
try:
df = df.query(filt.expr)
except Exception as e:
self.filters[ix].failed = True
logger.exception(e)
self.dataframe = df
self.update()
@staticmethod
def cast(x: Union["PandasGuiDataFrame", pd.DataFrame, pd.Series, Iterable]):
if type(x) == PandasGuiDataFrame:
return x
if type(x) == pd.DataFrame:
return PandasGuiDataFrame(x)
elif type(x) == pd.Series:
return PandasGuiDataFrame(x.to_frame())
else:
try:
return PandasGuiDataFrame( | pd.DataFrame(x) | pandas.DataFrame |
## Dataset.py
__all__ = ["Proxy",
"Train",
"Predict"]
import pandas as pd
from .utils import *
from .Descriptors import *
from tqdm import tqdm
import logging
class Proxy:
"""
It will communicate with diffrent object of BBPRED
"""
def __init__(self):
self.Full_Cached_DataFrame = None
self.Cached_DataFrame_HASH = None
def _Data_splitter(self,csv_file):
if file_validator(csv_file):
self.Cached_DataFrame_HASH = File_hash_calculator(csv_file)
df = pd.read_csv(csv_file)
SMILES = df["SMILES"]
Target = df["Target"]
return [SMILES,Target]
def _Descriptor_calc(self,smiles_target,args=None):
print(smiles_target)
SMILES,Target = smiles_validator(*smiles_target)
if args == None:
data = []
for i in tqdm(SMILES):
dis = Descriptor(i)
data.append(Data_dict(dis.get_all()))
return Data_frame(data,Target)
else:
single_data = []
for i in tqdm(SMILES):
single_data.append(Data_dict(Descriptor(i).get(args)))
return Data_frame(single_data,Target)
def get_raw(self,csv_file,):
SMILES , Target = self._Data_splitter(csv_file)
return pd.DataFrame({"SMILES":SMILES,
"Target":Target},)
def get_raw_descriptor(self,csv_file,*args,as_csv=False,path=""):
if File_hash_calculator(csv_file) == self.Cached_DataFrame_HASH:
return self.Full_Cached_DataFrame
else:
self.Cached_DataFrame_HASH = None
self.Full_Cached_DataFrame = None
if len(args) == 0:
if self.Full_Cached_DataFrame == None:
Smiles_data = self._Data_splitter(csv_file)
data = self._Descriptor_calc(Smiles_data)
self.Full_Cached_DataFrame = data
if as_csv:
return | pd.to_csv(data,path) | pandas.to_csv |
""" RNA-Seq analysis pipeline following the methods used by ICGC.
This script works specifically for the mice data on the JLU server.
Some of the functions could be rewritten for general use.
Function:
1. Walk through DATA_DIR and generate a design matrix.
2. Concatenate samples with multiple fastq files (splitted lanes).
3. Run fastp on all fq.gz files, and store the trimmed file under RES_DIR/data.
Reports of fastp are stored in RES_DIR/fastp.
4. Align each sample using a two-pass method with STAR
Output:
Under `RES_DIR/data` and `RES_DIR/fastp`, we have the trimmed FASTQ files and their
corresponding fastp reports. MultiQC reports is under `RES_DIR/fastp/multiqc`.
Under `RES_DIR/bam`, each sample should have its own sub-directory containing
the following files with the `sample_group` as a prefix:
- Aligned.out.bam: all genomic alignments including chimeric and unaligned reads
- Aligned.toTranscriptome.out.bam: aligned reads with transcript coordinates rather than genomic coordinates
- Chimeric.out.junction: reads that were mapped to different chromosomes or strands (fusion alignments)
- SJ.out.tab: high confidence collapsed splice junctions
- Log(.final|.progress).out
Under `RES_DIR/counts`, the counts produced by STAR is moved here.
See https://www.biostars.org/p/218995/. In our case cols 1 and 2 should be kept.
Under `RES_DIR/tpm`, TPM values produced by Salmon is stored.
Ensembl transcript IDs are used because it's mapped to the reference transcriptome.
Software and data:
- fastp v0.20.0: https://github.com/OpenGene/fastp
- FastQC v0.11.9: http://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.9.zip
- multiQC v1.8: https://multiqc.info
- STAR v2.7.2b: https://github.com/alexdobin/STAR
- Salmon v1.1.0: https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz
- Reference genome: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/GRCm38.primary_assembly.genome.fa.gz
- Reference transcriptome: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/gencode.vM24.transcripts.fa.gz
- Gene annotation: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/gencode.vM24.annotation.gtf.gz
References:
https://docs.gdc.cancer.gov/Data/Bioinformatics_Pipelines/Expression_mRNA_Pipeline/
https://github.com/akahles/icgc_rnaseq_align/blob/master/star_align.py
https://salmon.readthedocs.io/en/latest/salmon.html
https://combine-lab.github.io/alevin-tutorial/2019/selective-alignment/
Choice of software: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4728800/
"""
import glob
import gzip
import logging
import os
import re
import shutil
import subprocess
from typing import List
import pandas as pd
# Filepath variables
WORK_DIR = os.path.expanduser("~/rna_seq")
DATA_DIR = os.path.join(WORK_DIR, "mm_liver_data")
RES_DIR = os.path.join(WORK_DIR, "results")
REFERENCE_GENOME_PATH = os.path.join(WORK_DIR, "genome", "GRCm38.p6.genome.fa")
REFERENCE_TRANSCRIPTOME_PATH = os.path.join(
WORK_DIR, "genome", "gencode.vM24.transcripts.fa"
)
GENCODE_PATH = os.path.join(WORK_DIR, "genome", "gencode.vM24.annotation.gtf")
STAR_INDEX_DIR = os.path.join(WORK_DIR, "star_index")
FASTP_PATH = os.path.expanduser("~/pkg/bin/fastp")
FASTQC_PATH = os.path.expanduser("~/pkg/bin/fastqc")
MULTIQC_PATH = os.path.expanduser("~/.local/bin/multiqc")
STAR_PATH = os.path.expanduser("~/pkg/bin/STAR")
SALMON_PATH = os.path.expanduser("~/pkg/bin/salmon/bin/salmon")
for d in [
f"{RES_DIR}/data",
f"{RES_DIR}/fastp/multiqc",
f"{RES_DIR}/fastqc",
f"{RES_DIR}/bam",
f"{RES_DIR}/counts",
f"{RES_DIR}/tpm",
STAR_INDEX_DIR,
]:
os.makedirs(d, exist_ok=True)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(f"{__file__.rstrip('.py')}.log"))
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)s]{%(lineno)d:%(funcName)12s()} %(message)s",
"%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def ListFastqFiles(group: str = "original") -> List[List[str]]:
""" List fastq files to be analyzed in the current working directory.
Folders could contain multiple files for the same sample,
which should be joined as they are from multiple lanes.
Returns:
A list of lists of absolute paths to the fq.gz files. Each sublist
contains files in the same directory.
"""
# Find the lowest-level directories containing the files
all_files = glob.glob(f"{DATA_DIR}/{group}/**/FCH*.fq.gz", recursive=True)
dir_names = set((os.path.dirname(x) for x in all_files))
res = []
for dir_name in dir_names:
# Assuming we don't have any missing files
uniq_samples = list(
set(
(
re.sub(r"^(.*)_[12]\.fq\.gz$", r"\1", x)
for x in glob.glob(f"{dir_name}/FCH*.fq.gz")
)
)
)
logger.debug(f"Found {len(uniq_samples)} for {dir_name}")
res.append(uniq_samples)
return res
def Samples2DesignMatrix(samples: List[List[str]]) -> pd.DataFrame:
""" Convert the samples list to a design matrix.
Args:
samples (List[List[str]]): return from function `ListFastqFiles`.
Returns:
pd.DataFrame: a design matrix with the following columns:
- timepoint: 1 - 9
- sample_type: Control / Model / Genpin
- sample_group: e.g. original/T1_C/11_90bp
- filename: comma-separated filenames (_[1|2] removed)
"""
_samples = [x for subl in samples for x in subl]
_samples = [re.sub(f"{DATA_DIR}/", "", x) for x in _samples]
filenames = [os.path.basename(x) for x in _samples]
sample_groups = [os.path.dirname(x) for x in _samples]
possible_sample_types = {"C": "Control", "M": "Model", "G": "Genpin"}
timepoints = []
sample_type = []
for sg in sample_groups:
if sg.startswith("original"):
tmp = re.search("/T(\d+)_([CMG])", sg)
timepoints.append(tmp.group(1))
sample_type.append(possible_sample_types[tmp.group(2)])
else: # A "new" sample
tmp = re.search("/Time_(\d+)_.*/(Control|Model)", sg)
timepoints.append(tmp.group(1))
sample_type.append(tmp.group(2))
df = pd.DataFrame(
{
"timepoint": timepoints,
"sample_type": sample_type, # Control/Model
"sample_group": sample_groups, #
"filename": filenames, # FCH*_[1|2].fq.gz
}
)
df = df.sort_values(by=["timepoint", "sample_type", "sample_group"])
df = (
df.groupby(["timepoint", "sample_type", "sample_group"])["filename"]
.agg(",".join)
.reset_index()
)
return df
def ConcatSamples(samples: List[str]):
""" Concatenate samples in the same group to two fq.gz files.
Files are cat together because they are the same sample split to multiple
lanes. The concatenate files are for the forward and reverse reads.
Args:
samples (List[str]): comma-separated absolute file paths with the
trailing _[1|2].fq.gz stripped
Returns:
the `sample_group` of the concatenated files.
"""
f_out = f"{os.path.dirname(samples[0])}/"
sample_group = re.sub(DATA_DIR, "", f_out).strip("/")
if not os.path.exists(f"{f_out}merged_1.fq.gz"):
f_of_sample = " ".join([f"{x}_1.fq.gz" for x in samples])
cmd1 = f"cat {f_of_sample} > {f_out}merged_1.fq.gz"
subprocess.check_call(cmd1, shell=True)
logger.info(f"Concatenated fastq files for {sample_group}_1")
if not os.path.exists(f"{f_out}merged_2.fq.gz"):
f_of_sample = " ".join([f"{x}_2.fq.gz" for x in samples])
cmd2 = f"cat {f_of_sample} > {f_out}merged_2.fq.gz"
subprocess.check_call(cmd2, shell=True)
logger.info(f"Concatenated fastq files for {sample_group}_2")
return sample_group
if __name__ == "__main__":
logger.info("\x1b[31;1m" + "/*** GDC RNA-Seq pipeline started! ***/" + "\x1b[0m")
###################################################################
# Get design matrix #
###################################################################
logger.info("\x1b[33;21m" + "Step 1: get design matrix" + "\x1b[0m")
if os.path.exists(f"{DATA_DIR}/design_matrix.csv"):
design_mat = pd.read_csv(f"{DATA_DIR}/design_matrix.csv")
logger.info("Read design matrix from file")
else:
# Get sample paths for original and new files
original_samples = ListFastqFiles(group="original")
new_samples = ListFastqFiles(group="new")
all_samples = original_samples + new_samples
# Make design matrix of raw data files
design_mat = Samples2DesignMatrix(all_samples)
design_mat.to_csv(f"{DATA_DIR}/design_matrix.csv", index=False)
logger.info("Created design matrix")
###################################################################
# Concatenate multi-lane samples #
###################################################################
logger.info("\x1b[33;21m" + "Step 2: concatenate multi-lane samples" + "\x1b[0m")
sm_multi_lanes = design_mat[design_mat["filename"].str.contains(",")]
filenames = sm_multi_lanes.apply(
lambda x: [
f"{DATA_DIR}/{x['sample_group']}/{ele}" for ele in x["filename"].split(",")
],
axis=1,
).tolist()
for f in filenames:
sg = ConcatSamples(f)
design_mat.loc[design_mat["sample_group"] == sg, "filename"] = "merged"
assert not any(design_mat.filename.str.contains(","))
###################################################################
# Run fastp on all fq files #
###################################################################
logger.info("\x1b[33;21m" + "Step 3: QC and preprocess with fastp" + "\x1b[0m")
filenames = (
DATA_DIR + "/" + design_mat["sample_group"] + "/" + design_mat["filename"]
)
filenames = filenames.tolist()
sample_groups = [x.replace("/", "_") for x in design_mat["sample_group"]]
for i, (f, sg) in enumerate(zip(filenames, sample_groups)):
if os.path.exists(f"{RES_DIR}/fastp/{sg}_fastp.json"):
continue
logger.info(f"Running fastp on sample {sg}...")
subprocess.check_call(
f"""{FASTP_PATH} -V -i {f}_1.fq.gz -I {f}_2.fq.gz \
-o {RES_DIR}/data/{sg}_1.fq.gz \
-O {RES_DIR}/data/{sg}_2.fq.gz \
--html {RES_DIR}/fastp/{sg}_fastp.html \
--json {RES_DIR}/fastp/{sg}_fastp.json \
-w 4""",
shell=True,
)
logger.info(f"Generated fastp report for sample {sg}")
subprocess.check_call(
f"{MULTIQC_PATH} {RES_DIR}/fastp/ -m fastp -o {RES_DIR}/fastp/multiqc/",
shell=True,
)
if not os.path.exists(f"{RES_DIR}/fastqc/multiqc_report.html"):
subprocess.check_call(
f"{FASTQC_PATH} {RES_DIR}/data/* --noextract -o {RES_DIR}/fastqc/ -t 4"
)
subprocess.check_call(
f"{MULTIQC_PATH} {RES_DIR}/fastp/ -m fastqc -o {RES_DIR}/fastqc/",
shell=True,
)
###################################################################
# Align sequences and call counts #
###################################################################
logger.info("\x1b[33;21m" + "Step 4: STAR alignment" + "\x1b[0m")
# Build the STAR index if it's not already built
if not os.path.exists(f"{STAR_INDEX_DIR}/Genome"):
logger.info("STAR index not found. Building now...")
subprocess.check_call(
f"""STAR \
--runMode genomeGenerate \
--genomeDir {STAR_INDEX_DIR} \
--genomeFastaFiles {REFERENCE_GENOME_PATH} \
--sjdbOverhang 100 \
--sjdbGTFfile {GENCODE_PATH} \
--runThreadN 8 \
--outFileNamePrefix {WORK_DIR}/logs/star_index""",
shell=True,
)
logger.info(f"STAR index built to {STAR_INDEX_DIR}")
# Run STAR for each sample if output files are not found
sg_sms = design_mat["sample_group"].apply(os.path.basename)
for i, (sg, sm) in enumerate(zip(sample_groups, sg_sms)):
if os.path.exists(f"{RES_DIR}/counts/{sg}.tsv") and os.path.exists(
f"{RES_DIR}/bam/{sg}"
):
continue
os.makedirs(f"{RES_DIR}/bam/{sg}", exist_ok=True)
logger.info(f"Aligning sample {sg}")
subprocess.check_call(
f"""{STAR_PATH} \
--readFilesIn {RES_DIR}/data/{sg}_1.fq.gz {RES_DIR}/data/{sg}_2.fq.gz \
--outSAMattrRGline ID:{sg} SM:{sm} \
--alignIntronMax 1000000 \
--alignIntronMin 20 \
--alignMatesGapMax 1000000 \
--alignSJDBoverhangMin 1 \
--alignSJoverhangMin 8 \
--alignSoftClipAtReferenceEnds Yes \
--chimJunctionOverhangMin 15 \
--chimMainSegmentMultNmax 1 \
--chimOutType Junctions SeparateSAMold WithinBAM SoftClip \
--chimSegmentMin 15 \
--genomeDir {STAR_INDEX_DIR} \
--genomeLoad NoSharedMemory \
--limitSjdbInsertNsj 1200000 \
--outFileNamePrefix {RES_DIR}/bam/{sg}/{sg} \
--outFilterIntronMotifs None \
--outFilterMatchNminOverLread 0.33 \
--outFilterMismatchNmax 999 \
--outFilterMismatchNoverLmax 0.1 \
--outFilterMultimapNmax 20 \
--outFilterScoreMinOverLread 0.33 \
--outFilterType BySJout \
--outSAMattributes NH HI AS nM NM ch \
--outSAMstrandField intronMotif \
--outSAMtype BAM Unsorted \
--outSAMunmapped Within \
--quantMode TranscriptomeSAM GeneCounts \
--readFilesCommand zcat \
--runThreadN 8 \
--twopassMode Basic""",
shell=True,
)
shutil.move(
f"{RES_DIR}/bam/{sg}/{sg}ReadsPerGene.out.tab", f"{RES_DIR}/counts/{sg}.tsv"
)
logger.info(f"Counts for sample {sg} generated")
###################################################################
# Get TPM using Salmon #
###################################################################
logger.info("\x1b[33;21m" + "Step 5: Get TPM values using Salmon" + "\x1b[0m")
for sg in sample_groups:
if os.path.exists(f"{RES_DIR}/tpm/{sg}"):
continue
logger.info(f"Calling TPM values for sample {sg}")
subprocess.check_call(
f"""{SALMON_PATH} quant \
-t {REFERENCE_TRANSCRIPTOME_PATH} \
-l A \
-a {RES_DIR}/bam/{sg}/{sg}Aligned.toTranscriptome.out.bam \
-o {RES_DIR}/tpm/{sg} \
--gencode""",
shell=True,
)
###################################################################
# Combine counts and TPM tables #
###################################################################
logger.info("\x1b[33;21m" + "Step 6: Combine counts and TPM tables" + "\x1b[0m")
counts_table = []
tpm_table = []
for sg in sample_groups:
# Combine counts files into one table
df = pd.read_table(f"{RES_DIR}/counts/{sg}.tsv", header=None).iloc[:, 0:2]
df.columns = ["Ensembl", "count"]
df["sample_group"] = sg
counts_table.append(df)
# Combine TPM files, also copy the quant.sf files to a separate folder
df = pd.read_table(f"{RES_DIR}/tpm/{sg}/quant.sf")[["Name", "TPM"]]
df.columns = ["Ensembl", "TPM"]
df["sample_group"] = sg
tpm_table.append(df)
counts_table = | pd.concat(counts_table, axis=0, ignore_index=True) | pandas.concat |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from numpy.testing import assert_almost_equal
from supervised.preprocessing.loo_encoder import LooEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"], "y": [1, 2, 0]}
df = pd.DataFrame(data=d)
le = LooEncoder(cols=["col1"])
le.fit(df[["col1", "col2"]], df["y"])
self.assertTrue(le.enc is not None)
self.assertTrue(le.enc._dim == 2)
assert_almost_equal(le.enc._mean, 1.0)
self.assertTrue("col1" in le.enc.mapping)
self.assertTrue("col2" not in le.enc.mapping)
def test_transform(self):
# training data
d = {"col1": ["a", "a", "c"]}
y = [1, 1, 0]
df = | pd.DataFrame(data=d) | pandas.DataFrame |
import mne
import matplotlib.pyplot as plt
import os
import numpy as np
import scipy.signal as sp
import pandas as pd
import seaborn as sns
def load_file(file_name, channels=False):
os.chdir(r"") # EDF data file
data = mne.io.read_raw_edf(file_name, preload=True)
channels = data.ch_names
print(channels)
dataraw = data.get_data()
events = np.genfromtxt(file_name[:-4] + ".ann", dtype=int, delimiter=",", skip_header=1)
if channels == True:
return dataraw, events, channels
else:
return dataraw, events
channels = load_file("data.edf", channels=True)[2]
def FFT(data, window=500, freqs=False):
spectre = abs(np.fft.fft(data).real) ** 2
freq = np.fft.fftfreq(window, d=.004)
if freqs == True:
return freq, spectre
else:
return spectre
data, events = load_file("data.edf")
print(data.shape)
def Epoched_data(data, window):
e_data = data[:, events[0, 0]:events[0, 0] + window]
for i in events[1:, 0]:
e_data = np.append(e_data, data[:, i:i + window], axis=1)
return e_data[:-4,:].reshape(-1, 19, 500)
print(Epoched_data(load_file("data.edf")[0], 500))
freqs = FFT(Epoched_data(load_file("data.edf")[0], 500), freqs=True)[0][:250]
freqs1 = np.array(list(freqs) * 118).reshape(-1, 1)
ev = list(events[:, 2]) * 250
ev = np.array(ev).reshape(250, -1)
ev = ev.T.reshape(-1, 1)
result = np.append(ev, freqs1, axis=1)
def Feat_Spectre():
ex_t = np.zeros((1, 19))
e_data = Epoched_data(load_file("data.edf")[0], 500)
for j in range(e_data.shape[0]):
temp = np.zeros((1, 250))
for i in range(e_data.shape[1]):
temp = np.append(temp, [FFT(e_data[j, i, :])[:250]], axis=0)
temp = np.rot90(np.flip(temp[1:20,:], axis = 1), k = 1)
ex_t = np.append(ex_t, temp, axis=0)
return ex_t[1:,:]
spectre = Feat_Spectre()
db = | pd.DataFrame(spectre) | pandas.DataFrame |
from __future__ import print_function
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import colors
from collections import defaultdict
df = | pd.read_csv('stats.txt', index_col=0, dtype={'game': 'str'}) | pandas.read_csv |
#/usr/bin/env python
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, roc_curve
# import argparse, os
#
# parser = argparse.ArgumentParser()
# parser.add_argument("path_to_data")
# args = parser.parse_args()
# testfile = args.path_to_data
df = load_breast_cancer()
data, target, target_names, feature_names = df.data, df.target, df.target_names, df.feature_names
# plot heatmap
df = | pd.DataFrame(data, columns=feature_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 21:53:32 2017
@author: gason
"""
import pandas as pd
import numpy as np
import re
import time
import os
from collections import Iterable
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_number
from pandas.api.types import is_datetime64_any_dtype
from pandas.api.types import is_categorical_dtype
from scipy import stats
from sklearn import metrics
from . import report as _rpt
from . import config
from .report import genwordcloud
from .utils.metrics import entropyc
from .utils import iqr
#from sklearn.neighbors import KernelDensity
import matplotlib.pyplot as plt
import seaborn as sns
_thisdir = os.path.split(__file__)[0]
# default chinese font
from matplotlib.font_manager import FontProperties
font_path=config.font_path
if font_path:
myfont=FontProperties(fname=font_path)
sns.set(font=myfont.get_name())
__all__=['type_of_var',
'describe',
'plot',
'features_analysis',
'distributions',
'AnalysisReport',
'ClassifierReport']
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
assert len(a.shape)>0
assert len(a)>0
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def distributions(a,hist=True,bins=None,norm_hist=True,kde=False,grid=None,gridsize=100,clip=None):
'''数组的分布信息
hist=True,则返回分布直方图(counts,bins)
kde=True,则返回核密度估计数组(grid,y)
example
-------
a=np.random.randint(1,50,size=(1000,1))
'''
a = np.asarray(a).squeeze()
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
counts,bins=np.histogram(a,bins=bins)
if norm_hist:
counts=counts/counts.sum()
if kde:
bw='scott'
cut=3
if clip is None:
clip = (-np.inf, np.inf)
try:
kdemodel = stats.gaussian_kde(a, bw_method=bw)
except TypeError:
kdemodel = stats.gaussian_kde(a)
bw = "scotts" if bw == "scott" else bw
bw = getattr(kdemodel, "%s_factor" % bw)() * np.std(a)
if grid is None:
support_min = max(a.min() - bw * cut, clip[0])
support_max = min(a.max() + bw * cut, clip[1])
grid=np.linspace(support_min, support_max, gridsize)
y = kdemodel(grid)
if hist and not(kde):
return counts,bins
elif not(hist) and kde:
return grid,y
elif hist and kde:
return ((counts,bins),(grid,y))
else:
return None
def dtype_detection(data,category_detection=True,StructureText_detection=True,\
datetime_to_category=True,criterion='sqrt',min_mean_counts=5,fix=False):
'''检测数据中单个变量的数据类型
将数据类型分为以下4种
1. number,数值型
2. category,因子
3. datetime,时间类型
4. text,文本型
5. text_st,结构性文本,比如ID,
6. group_number,连续
parameter
---------
data: pd.Series 数据, 仅支持一维
# 如果有data,则函数会改变原来data的数据类型
category_detection: bool,根据 nunique 检测是否是因子类型
StructureText_detection: bool, 结构化文本,如列中都有一个分隔符"-"
datetime_to_category: 时间序列如果 nunique过少是否转化成因子变量
criterion: string or int, optional (default="sqrt",即样本数的开根号)
支持:'sqrt':样本量的开根号, int: 绝对数, 0-1的float:样本数的百分多少
检测因子变量时,如果一个特征的nunique小于criterion,则判定为因子变量
min_mean_counts: default 5,数值型判定为因子变量时,需要满足每个类别的平均频数要大于min_mean_counts
fix: bool,是否返回修改好类型的数据
return:
result:dict{
'name':列名,
'vtype':变量类型,
'ordered':是否是有序因子,
'categories':所有的因子}
'''
assert len(data.shape)==1
data=data.copy()
data=pd.Series(data)
dtype,name,n_sample=data.dtype,data.name,data.count()
min_mean_counts=5
if criterion=='sqrt':
max_nuniques=np.sqrt(n_sample)
elif isinstance(criterion,int):
max_nuniques=criterion
elif isinstance(criterion,float) and (0<criterion<1):
max_nuniques=criterion
else:
max_nuniques=np.sqrt(n_sample)
ordered=False
categories=[]
if is_numeric_dtype(dtype):
vtype='number'
ordered=False
categories=[]
# 纠正误分的数据类型。如将1.0,2.0,3.0都修正为1,2,3
if data.dropna().astype(np.int64).sum()==data.dropna().sum():
data[data.notnull()]=data[data.notnull()].astype(np.int64)
if category_detection:
nunique=len(data.dropna().unique())
mean_counts=data.value_counts().median()
if nunique<max_nuniques and mean_counts>=min_mean_counts:
data=data.astype('category')
ordered=data.cat.ordered
vtype='category'
categories=list(data.dropna().cat.categories)
result={'name':name,'vtype':vtype,'ordered':ordered,'categories':categories}
elif is_string_dtype(dtype):
# 处理时间类型
tmp=data.map(lambda x: np.nan if '%s'%x == 'nan' else len('%s'%x))
tmp=tmp.dropna().astype(np.int64)
if not(any(data.dropna().map(is_number))) and 7<tmp.max()<20 and tmp.std()<0.1:
try:
data=pd.to_datetime(data)
except :
pass
# 处理可能的因子类型
#时间格式是否处理为True 且
if datetime_to_category:
if len(data.dropna().unique())<np.sqrt(n_sample):
data=data.astype('category')
else:
nunique=len(data.dropna().unique())
#print(data.dtype)
if not(is_categorical_dtype(data.dtype)) and not(np.issubdtype(data.dtype,np.datetime64)) and nunique<max_nuniques:
data=data.astype('category')
# 在非因子类型的前提下,将百分数转化成浮点数,例如21.12%-->0.2112
if is_string_dtype(data.dtype) and not(is_categorical_dtype(data.dtype)) and all(data.str.contains('%')):
data=data.str.strip('%').astype(np.float64)/100
if | is_categorical_dtype(data.dtype) | pandas.api.types.is_categorical_dtype |
import pandas as pa
import json
import datetime as dt
dfc = pa.read_json('./data/clients.json')
dft = pa.read_json('./data/tarifs.json')
dfv = | pa.read_json('./data/vehicules.json') | pandas.read_json |
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from src.shared.config import loadConfig
def imputeByMean(data, nonNumeric):
# for median imputation replace 'mean' with 'median'
imputedMean = SimpleImputer(strategy='mean')
dataNumeric = data.drop(nonNumeric, axis=1)
imputedMean.fit(dataNumeric)
imputedData = imputedMean.transform(dataNumeric)
d1 = | pd.DataFrame(imputedData) | pandas.DataFrame |
# Perform tolerance sweep tolerance by ESM-SSP and well-characterized errors
# for an audience seeking to emulate from the full CMIP6 archive and for the
# scenarioMIP approach, for all ESMs
# For each ESM, loop over various tolerances and generate Ndraws = 500
# GSAT trajectories. Archives with and without each target to characterize
# error. (Reproducible mode off so draws are different). And the ScenMIP.
# Compare to the target ensemble via 4 metrics (E1, E2 on both Tgavs and
# jumps) and record errors for each draw and tolerance.
## TODO: comment out saving the GSATs
## TODO functionalize at least some part of the analysis or at least use a for-loop
## over the different SSP targets so that the code isn't so long and repetitive
# making a table of avail runs X planned archives and for looping over that would
# trim things down (see approach for max tol runs). And rewrite the tolerance
# iteration to be a while loop, comparing current to prev instead of calculating
# and saving it all? Update writes and reads to be subdir so things are tidier
# would be better to functionalize this script with ESM, tol and Ndraws as arguments
# and then have the .sh just call the function and dispatch to diff nodes for each run I guess.
# #############################################################################
# General setup
# #############################################################################
# Import packages
import pandas as pd
import numpy as np
import stitches as stitches
import pkg_resources
import os
from pathlib import Path
| pd.set_option('display.max_columns', None) | pandas.set_option |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
class ForestFires(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Forest+Fires).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'forestfires.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class GNFUV(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00452/GNFUV USV Dataset.zip'
download_unzip(url, dataset_path)
dfs = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
dfs.append(pd.read_csv(file_path, header=None))
class GNFUV_2(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data+Set+2).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00466/CNFUV_Datasets.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class Greenhouse_Gas_Observing_Network (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Greenhouse+Gas+Observing+Network).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00328/ghg_data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None, sep='\s+'))
class Hungarian_Chickenpox_Cases (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Hungarian+Chickenpox+Cases).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00580/hungary_chickenpox.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, index_col='Date', parse_dates=True))
class IIWA14_R820_Gazebo_Dataset_10Trajectories(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/IIWA14-R820-Gazebo-Dataset-10Trajectories).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00574/IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, header=None)
class Metro_Interstate_Traffic_Volume(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Metro_Interstate_Traffic_Volume.csv.gz'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_News_Final(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'News_Final.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Online_Video_Characteristics_and_Transcoding_Time(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Online+Video+Characteristics+and+Transcoding+Time+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00335/online_video_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'README.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class OnlineNews(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'OnlineNewsPopularity', 'OnlineNewsPopularity.csv')
df = pd.read_csv(file_path, )
df.drop(columns=['url', ' timedelta'], inplace=True)
y_columns = [' shares']
df[y_columns[0]] = np.log(df[y_columns[0]])
self.x, self. y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Parkinson(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/parkinsons).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path: str = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/' \
'parkinsons/telemonitoring/parkinsons_updrs.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path)
y_columns = ['motor_UPDRS', 'total_UPDRS']
df_train_valid = df[df['subject#'] <= 30]
df_test = deepcopy(df[df['subject#'] > 30])
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'subject#')
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res.drop(columns='subject#', inplace=True)
self.x, self.y = xy_split(df_res, y_columns)
class Physicochemical_Properties_of_Protein_Tertiary_Structure(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Physicochemical+Properties+of+Protein+Tertiary+Structure).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CASP.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class PPG_DaLiA_Data_Set(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/PPG-DaLiA).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00495/data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR_aquatic_toxicity(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+aquatic+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_aquatic_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00505/qsar_aquatic_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=["TPSA(Tot)", "SAacc", "H-050", "MLOGP", "RDCHI", " GATS1p", "nN", "C-040", "quantitative response, LC50 [-LOG(mol/L)]"])
class QSAR_fish_bioconcentration_factor(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00511/QSAR_fish_BCF.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn =='ECFP_1024_m0-2_b2_c.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+fish+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_fish_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00504/qsar_fish_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=[" CIC0", "SM1_Dz(Z)", " GATS1i", "NdsCH", " NdssC", "MLOGP", "quantitative response, LC50 [-LOG(mol/L)]"])
class PowerPlant(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'CCPP', 'Folds5x2_pp.xlsx')
df = pd.read_excel(file_path)
y_columns = ['PE'] # Not clear if this is the aim of the dataset
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class ResidentialBuilding(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Residential-Building-Data-Set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00437/Residential-Building-Data-Set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
y_columns = ['Y house price of unit area']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class RealEstate(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Real estate valuation data set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00477/Real%20estate%20valuation%20data%20set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path, index_col='No')
class Real_time_Election_Results (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00513/ElectionData2019.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if '.csv' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Seoul_Bike_Sharing_Demand(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Seoul+Bike+Sharing+Demand).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SeoulBikeData.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00560/SeoulBikeData.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Servo(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Servo).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'servo.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/servo/servo.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["motor", "screw", " pgain", "vgain", "class"])
class SGEMM_GPU_kernel_performance (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SGEMM+GPU+kernel+performance).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00440/sgemm_product_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'Readme.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Simulated_data_for_survival_modelling (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Simulated+data+for+survival+modelling).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00581/MLtoSurvival-Data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class SkillCraft1(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SkillCraft1+Master+Table+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SkillCraft1_Dataset.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00272/SkillCraft1_Dataset.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class SML2010 (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SML2010).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00274/NEW-DATA.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\s+'))
class Solar_Flare(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Solar+Flare).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'flare.data1'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data1'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df1 = pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+')
filename = 'flare.data2'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data2'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df2 = pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+')
df = | pd.merge(df1, df2) | pandas.merge |
#!/usr/bin/python3.7
#Dependencies
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.optimize import minimize_scalar
import scipy.special as sp
import matplotlib.pyplot as plt
from scipy.stats import linregress
from scipy.stats import t
from scipy import integrate
import derivative
#-----------------------------------------------------------------------------------------------------------
class DataExtraction(object):
"""
Class that manipulates raw data to create lists and Data Frames
that will be used to compute the Activation Energy.
"""
def __init__(self):
"""
Constructor.
Parameters: None
Notes: It only defines variables.
"""
self.DFlis = [] #list of DataFrames containing data
self.Beta = [] #list of heating rates
self.BetaCC = [] #list of correlation coefficient for T vs t
self.files = [] #list of files containing raw data
self.da_dt = [] #list of experimental conversion rates
self.T = [] #list of experimental temperature
self.t = [] #list off experimental time
self.TempIsoDF = pd.DataFrame() #Isoconversional temperature DataFrame
self.timeIsoDF = pd.DataFrame() #Isoconversional time DataFrame
self.diffIsoDF = pd.DataFrame() #Isoconversional conversion rate DataFrame
self.TempAdvIsoDF = pd.DataFrame() #Advanced isoconversional temperature DataFrame
self.timeAdvIsoDF = pd.DataFrame() #Advanced isoconversional time DataFrame
self.alpha = [] #list of experimental conversion
self.d_a = 0.00001 #default value of alpha step for aVy method
#-----------------------------------------------------------------------------------------------------------
def set_data(self, filelist):
"""
Method to establish the file list for the extrator.
Parameters: filelist : list object containing the paths
of the files to be used.
Notes: The paths must be sorted in ascendent heating
rate order.
"""
print("Files to be used: \n{} ".format(filelist))
self.files = filelist
#-----------------------------------------------------------------------------------------------------------
def data_extraction(self,encoding='utf8'):
"""
Method to extract the data contained in the files into a list of DataFrames.
Adds three columns: one corresponding to the absolute temperature, another
corresponding to the conversion ('alpha') and a third for d(alpha)/dt.
Also computes The heating rate ('Beta') with its Correlation Coefficient.
Parameters: encoding: The available encodings for pandas.read_csv() method. Includes but not limited
to 'utf8', 'utf16','latin1'. For more information on the python standar encoding:
(https://docs.python.org/3/library/codecs.html#standard-encodings)
"""
BetaCorrCoeff = self.BetaCC
DFlis = self.DFlis
Beta = self.Beta
filelist = self.files
alpha = self.alpha
da_dt = self.da_dt
T = self.T
t = self.t
# Read the data from each csv
# Create the Dataframe of each experiment
# Add three columns (T,alpha,(da/dt))
# Compute the linear regression of T vs t
for item in filelist:
try:
DF = pd.read_table(item, sep = '\t', encoding = encoding)
DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15
DF[r'$\alpha$'] = (DF[DF.columns[2]][0]-DF[DF.columns[2]])/(DF[DF.columns[2]][0]-DF[DF.columns[2]][DF.shape[0]-1])
dadt = derivative.dxdt(DF[r'$\alpha$'],DF[DF.columns[0]],kind='spline',s=0.01,order=5)
DF[r'$d\alpha/dt$'] = DF[DF.columns[0]]
DF[r'$d\alpha/dt$'] = dadt
except IndexError:
DF = pd.read_table(item, sep = ',', encoding = encoding)
DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15
DF[r'$\alpha$'] = (DF[DF.columns[2]][0]-DF[DF.columns[2]])/(DF[DF.columns[2]][0]-DF[DF.columns[2]][DF.shape[0]-1])
dadt = derivative.dxdt(DF[r'$\alpha$'],DF[DF.columns[0]],kind='spline',s=0.01,order=5)
DF[r'$d\alpha/dt$'] = DF[DF.columns[0]]
DF[r'$d\alpha/dt$'] = dadt
LR = linregress(DF[DF.columns[0]],DF[DF.columns[3]])
BetaCorrCoeff.append(LR.rvalue)
Beta.append(LR.slope)
DFlis.append(DF)
# Create an array of sorted in ascendent order values of conversion (alpha) and arrays
# for the temperature, time and rate of conversion corresponding to said conversion values
for i in range(len(DFlis)):
a = [DFlis[i][r'$\alpha$'].values[0]]
Temp = [DFlis[i]['Temperature [K]'].values[0]]
time = [DFlis[i][DFlis[i].columns[0]].values[0]]
diff = [DFlis[i][r'$d\alpha/dt$'].values[1]]
for j in range(len(DFlis[i][r'$\alpha$'].values)):
if DFlis[i][r'$\alpha$'].values[j] == a[-1]:
pass
elif DFlis[i][r'$\alpha$'].values[j] > a[-1]:
a.append(DFlis[i][r'$\alpha$'].values[j])
Temp.append(DFlis[i]['Temperature [K]'].values[j])
time.append(DFlis[i][DFlis[i].columns[0]].values[j])
diff.append(DFlis[i][r'$d\alpha/dt$'].values[j])
else:
pass
alpha.append(np.array(a))
T.append(np.array(Temp))
t.append(np.array(time))
da_dt.append(np.array(diff))
self.BetaCC = BetaCorrCoeff
self.DFlis = DFlis
self.Beta = Beta
self.da_dt = da_dt
self.T = T
self.t = t
self.alpha = alpha
#-----------------------------------------------------------------------------------------------------------
def get_beta(self):
"""
Parameters: None
Returns: list object containing the experimental heating rate sorted
in ascendent order obtained from a linear regression of T vs t.
"""
return self.Beta
#-----------------------------------------------------------------------------------------------------------
def get_betaCC(self):
"""
Parameters: None
Returns: list object containing the experimental T vs t correlation coefficient
obtained from a linear regression, sorted in correspondance with the
heating rate list (attribute Beta).
"""
return self.BetaCC
#-----------------------------------------------------------------------------------------------------------
def get_DFlis(self):
"""
Parameters: None
Returns: list object containing the DataFrames with the experimental data, sorted
in correspondance with the heating rate list (attribute Beta).
"""
return self.DFlis
#-----------------------------------------------------------------------------------------------------------
def isoconversional(self):
"""
Isoconversional DataFrames building method for the Friedman, KAS, OFW and Vyazovkin methods.
The isoconversional values for T, t and da/dt are obtained by interpolation.
Parameters: None
Returns: None
Notes: This method asigns values to the attributes: TempIsoDF, timeIsoDF and diffIsoDF
"""
alpha = self.alpha
da_dt = self.da_dt
T = self.T
t = self.t
DFlis = self.DFlis
TempIsoDF = pd.DataFrame()
timeIsoDF = pd.DataFrame()
diffIsoDF = pd.DataFrame()
Beta = self.Beta
# Take the experimental data set with the less data points (alps), so the interpolation is made with the
# data sets with more experimental information.
# Create the interpolation functions and evaluate them over the conversion values of the latter set (alps).
# Create the isoconversional DataFrames with the conversion values (alps) as index and the
# interpolation values as columns corresponding to their experimental heating rates.
alps = np.array(alpha[-1])
TempIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(T[-1], decimals = 4)
timeIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(t[-1], decimals = 4)
diffIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(da_dt[-1], decimals = 4)
for i in range(len(Beta)-1):
inter_func = interp1d(alpha[i],
t[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
timeIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func(alps), decimals = 4)
inter_func2 = interp1d(alpha[i],
T[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
TempIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func2(alps), decimals = 4)
inter_func3 = interp1d(alpha[i],
da_dt[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
diffIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func3(alps), decimals = 4)
colnames = TempIsoDF.columns.tolist()
colnames = colnames[1:] + colnames[:1]
TempIsoDF.index = alpha[-1]
TempIsoDF = TempIsoDF[colnames]
timeIsoDF.index = alpha[-1]
timeIsoDF = timeIsoDF[colnames]
diffIsoDF.index = alpha[-1]
diffIsoDF = diffIsoDF[colnames]
self.TempIsoDF = TempIsoDF
self.timeIsoDF = timeIsoDF
self.diffIsoDF = diffIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_TempIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional temperatures. The index is the set of conversion
values from the experiment with the less data points (which correspond to the
smallest heating rate). The columns are isoconversional temperatures, sorted in
heating rate ascendent order from left to right.
"""
return self.TempIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_timeIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional times. The index is the set of conversion values
from the experiment with the less data points (which correspond to the smallest
heating rate). The columns are isoconversional times, sorted in heating rate
ascendent order from left to right.
"""
return self.timeIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_diffIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional conversion rates. The index is the set of conversion
values from the experiment with the less data points (which correspond to the smallest
heating rate). The columns are isoconversional conversion rates, sorted in heating
rate ascendent order from left to right.
"""
return self.timeIsoDF
#-----------------------------------------------------------------------------------------------------------
def adv_isoconversional(self, method='points', N = 1000, d_a = 0.001):
"""
Isoconversional DataFrames building method for the advanced Vyazovkin method. The isoconversional
values for T and t are obtained by interpolation.
Parameters: method : String. Value can be either 'points' or 'interval'. ṕoints'is the
default value.
N : Int. Number of conversion points if the 'points' method is given.
1000 is the default value.
d_a : Float. Size of the interval between conversion values if the method
'interval' is given. 0.001 is the default value.
Returns: None
Notes: This method asigns values to the attributes: TempAdvIsoDF, timeAdvIsoDF and d_a
"""
TempAdvIsoDF = pd.DataFrame()
timeAdvIsoDF = | pd.DataFrame() | pandas.DataFrame |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.models import User, auth
from django.contrib.auth import get_user_model
from django.db.models import Sum, Q, F
from django.contrib import messages
from django.views.generic import FormView
from rest_framework.views import APIView
from rest_framework.response import Response
from sqlalchemy import create_engine
from .mixins import AjaxFormMixin
from .forms import Item_Form, Day_Form, New_Item_Form
from .models import Item, New_item
from . import connectpsql
from .filters import ItemFilters
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from numpy import exp, array, random, dot
from sklearn.preprocessing import MinMaxScaler, scale
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras import models
from statsmodels.tsa.arima_model import ARIMA, ARMA
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.graphics.tsaplots import plot_pacf
from pmdarima.arima.utils import ndiffs
from TFANN import ANNR
from pandas.plotting import autocorrelation_plot
from flask import Flask, render_template
from django.views.decorators.csrf import csrf_exempt, csrf_protect
# point to CustomUser table
User = get_user_model()
# fill up the empty rows with zero
def insertZero(costList):
# daily total
dfDay = costList.cost.resample('D').sum()
today = datetime.datetime.today() #yyyy-mm-dd
last_date = dfDay.iloc[[-1]].index # find the last date of dfDay
# add zero until today
while last_date < today - datetime.timedelta(days=1):
last_date += datetime.timedelta(days=1) # add 1 day
new_row = pd.Series(data={" ": 0}, index=last_date) # create a new row
dfDay = dfDay.append(new_row, ignore_index=False) # insert into dfDay
dfDay = dfDay.replace(to_replace=np.nan, value=0)
return round(dfDay, 2)
# predicting
def processPrediction(dfDay, history, prediction_days):
last_date = dfDay.iloc[[-1]].index + datetime.timedelta(days=1)
## orginal days list
dfOrginal = pd.DataFrame(columns=["date", "cost"])
# dfOrginal = pd.DataFrame(columns=["cost"])
dfOrginal.date = dfDay.index
dfOrginal.cost = dfDay.tolist()
# dfOrginal.set_index("date", inplace=True)
## predict days list
dfPredict = pd.DataFrame(columns=["date", "cost"])
dfPredict.date = pd.date_range(last_date[0], periods=prediction_days, freq="D")
dfPredict.cost = history[-prediction_days:]
# dfPredict.set_index("date", inplace=True)
## Combine two data lists
# dfOrginal = dfOrginal.append(dfPredict)
# plt.plot(dfOrginal.index, dfOrginal)
# plt.show( )
return dfOrginal, dfPredict
# inverse the difference in the dataset
def inverse_diffference(history, predict, interval=1):
# print(predict, " + ", history[-interval])
return predict + history[-interval]
# find the difference in the dataset
def difference(df, interval=1):
diff_list = []
for i in range(interval, len(df)):
value = df[i] - df[i - interval]
# print("i = ", i, " i - interval = ", (i -interval))
# print(df[i], " - ", df[i - interval], " = ", value)
diff_list.append(value)
return array(diff_list)
# convert to number
def convertToNum(df):
df = df.replace(to_replace="Income", value=0)
df = df.replace(to_replace="Expense", value=1)
df = df.replace(to_replace="Salaries and wages", value=10)
df = df.replace(to_replace="Utility expenses", value=11)
df = df.replace(to_replace="Administration expenses", value=12)
df = df.replace(to_replace="Finance costs", value=13)
df = df.replace(to_replace="Depreciation", value=14)
df = df.replace(to_replace="Impairment losses", value=15)
df = df.replace(to_replace="Food", value=16)
df = df.replace(to_replace="Others", value=17)
return df
# processing data from psql
def processingDataset(dataset, predict_type):
pd.set_option('display.max_rows', None)
# Create a DataFrame
df = pd.DataFrame(columns=["date", "itemType", "costType", "cost"])
df.date = dataset.date.tolist()
df.itemType = dataset.item_type.tolist()
df.costType = dataset.cost_type.tolist()
df.cost = dataset.cost.tolist()
# Set date to be df.index
df.set_index("date", inplace=True)
df.index = pd.to_datetime(df.index)
# Convert string to num
df = convertToNum(df)
# today = today.replace(hour=0, minute=0, second=0, microsecond=0)
# dfDay.index[-1] #yyyy-mm-dd hh:mm:ss
# Filter costType
if predict_type == "income":
incomeList = df[df.costType==0]
# incomeList = incomeList.drop(columns="costType")
dfDay = insertZero(incomeList)
return dfDay
elif predict_type == "expense":
expensesList = df[df.costType==1]
# expensesList = expensesList.drop(columns="costType")
dfDay = insertZero(expensesList)
return dfDay
## Sort by date, item type, and cost type
# dfGroup = expensesList.groupby(["date", "itemType"]).sum()
## Weekly total
# dfWeek = expensesList.cost.resample('W').sum()
## Monthly total
# dfMonth = expensesList.cost.resample('M').sum()
# dfWeekDay = pd.merge(dfWeek, dfDay, how="outer", on="date", sort=True, suffixes=("_week", "_day"))
# dfMonth = pd.merge(dfMonth, dfWeekDay, how="outer", on="date", sort=True)
# dfMonth = dfMonth.replace(to_replace=np.nan, value=-1)
# prediction function
def prediction(days, predict_type, username):
# Connect to psql server
engine = create_engine(connectpsql.psql)
sql_command = "SELECT date, item_type, cost_type, cost FROM budget_new_item i INNER JOIN user_customuser u ON i.username_id = u.id WHERE u.username = '" + str(username) + "' ORDER BY i.date"
# Read dataset from psql server
dataset = | pd.read_sql(sql_command, engine, parse_dates=["date"]) | pandas.read_sql |
#!/usr/bin/env python
import pandas as pd
from collections import namedtuple
from dataclasses import dataclass
from dataclasses import asdict
Author = namedtuple('Author', 'authorName journal date')
authorList = []
authorList.append(Author('<NAME>','Journal of Witchcraft',2018))
authorList.append(Author('<NAME>','Duck Duck Goose Transactions',2019))
authorList.append(Author('<NAME>','Journal of Ambiguity',2017))
df = | pd.DataFrame.from_records(authorList,columns=Author._fields) | pandas.DataFrame.from_records |
"""
Microsoft Learning to Rank Dataset:
https://www.microsoft.com/en-us/research/project/mslr/
"""
import datetime
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
def get_time():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class DataLoader:
def __init__(self, path):
"""
:param path: str
"""
self.path = path
self.pickle_path = path[:-3] + 'pkl'
self.df = None
self.num_pairs = None
self.num_sessions = None
def get_num_pairs(self):
if self.num_pairs is not None:
return self.num_pairs
self.num_pairs = 0
for _, Y in self.generate_batch_per_query(self.df):
Y = Y.reshape(-1, 1)
pairs = Y - Y.T
pos_pairs = np.sum(pairs > 0, (0, 1))
neg_pairs = np.sum(pairs < 0, (0, 1))
assert pos_pairs == neg_pairs
self.num_pairs += pos_pairs + neg_pairs
return self.num_pairs
def get_num_sessions(self):
return self.num_sessions
def _load_mslr(self):
print(get_time(), "load file from {}".format(self.path))
df = | pd.read_csv(self.path, sep=" ", header=None) | pandas.read_csv |
#!/usr/bin/env python3
"""
September 2020
<EMAIL>
flagellin.py
formerly: pipeline_flagellin_TLR5.py
The D1 subunit of bacterial flagellin has the potential to act an an immune adjuvant via
direct iteraction with Toll LIke Receptor 5. Structural studies suggest an Arginine at reference position
R[89] in bascillus subtilis is conserved amoung TLR5-activiating flagelliln.
Based on multiple sequence alignment, the molecular pattern used in this study t
o identify TLR5 activating (hereafter TLR5+) flagelin is '([LVIM].R[MIALV]..[LI])'.
This pipeline permits the interogation of raw metagenomic .fastq datafiles, quantifying detectable bacterial flagellins
and scoring whether they match TLR5+ reference genes.
The primary motif responsible for the D1-flagelline subunit-TLR5 interface is found within
the Bacterial flagellin N-terminal helical region (https://pfam.xfam.org/family/PF00669).
As of September 2020, EMBL-EBI PFAM database conatins (73m795) protein sequences from NCBI annotated
as members of thsi protein family. These secquencs were downloaded /data/PF00669_ncbi.txt/.
He et al 2020 also constructed a taxanomically annotated database of flagellin sequences from the
Protein Family00669 as well Protein Family 00700 Bacterial flagellin C-terminal helical region).
Version 1 of our flagellin database is the intersection of sequences in PF00669_ncbi.txt and
the Flagellin DB.
"""
import numpy as np
import os
import pandas as pd
import re
from Bio import SeqIO
def safe_re_search(s, pattern, group = 0):
r = re.search(pattern =pattern , string =s)
if r is not None:
return r.groups()[group]
else:
return None
def construct_reference_flagellin_dataframe(pfam_ncbi_fasta_filename,
hu_accession_list_filename,
hu_table_filename,
tlr5_pos_pattern = '([LVIM].R[MIALV]..[LI])',
base_dir = '/Users/kmayerbl/TLR/'):
"""
Constructs a DataFrame that will be used later on to score diamond results
Parameters
----------
pfam_ncbi_fasta_filename : str
Default 'inputs/PF00669_ncbi.txt'
hu_accession_list_filename : str
Default 'inputs/hu_all_ncbi.txt'
hu_table_filename : str
Default 'inputs/hu_table.csv'
tlr5_pos_pattern : str
Default '([LVIM].R[MIALV]..[LI])'
base_dir : str
Returns
-------
df : Pandas DataFrame
"""
record_dict = SeqIO.to_dict(SeqIO.parse(pfam_ncbi_fasta_filename, 'fasta'))
mapr = {x.strip().split("/")[0]:x for x in record_dict.keys()}
ncbi_in_pfam = [x.strip().split("/")[0] for x in record_dict.keys()]
with open(hu_accession_list_filename, 'r') as f:
accessions = f.readlines()
hu_ncbi_all = [x.strip() for x in accessions]
set(hu_ncbi_all).intersection(set(ncbi_in_pfam))
acc = list(set(hu_ncbi_all).intersection(set(ncbi_in_pfam)))
df = | pd.DataFrame({"Accession": acc}) | pandas.DataFrame |
import os
os.environ['NUMEXPR_NUM_THREADS'] = '1'
import pandas as pd
import streamlit as st
import numexpr
numexpr.set_num_threads(1)
from models.first_model import read_data, split_data, train_model
df = read_data()
X_train, X_test, y_train, y_test = split_data(df)
clf = train_model(X_train, X_test, y_train, y_test)
def predict():
dict_slides.update(dict_selects)
print(dict_slides)
df = | pd.DataFrame([dict_slides]) | pandas.DataFrame |
import dataset
import librosa
from torch.utils.data import DataLoader, random_split
import torch
import torch.nn.functional as F
from utils import *
import torchvision.models as models
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import argparse
import yaml
from pathlib import Path
import pandas as pd
import time
#from models import *
#from filter import *
from networks import *
from torch.autograd import Variable
import glob
from mel2wav.modules import MelGAN_Generator, Audio2Mel
LongTensor = torch.cuda.LongTensor
FloatTensor = torch.cuda.FloatTensor
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type = str, default = '0')
parser.add_argument("--experiment_path", type = str, default = None)
parser.add_argument("--save_specs", type = bool, default = False)
parser.add_argument("--save_audio", type = bool, default = False)
parser.add_argument("--filter_receptive_field", type = int, default = 3)
parser.add_argument("--n_mel_channels", type = int, default = 80)
parser.add_argument("--ngf", type = int, default = 32)
parser.add_argument("--n_residual_layers", type = int, default = 3)
# Data parameters
parser.add_argument("--sampling_rate", type = int, default = 8000)
parser.add_argument("--segment_length", type = int, default = 8192)
parser.add_argument("--batch_size", type = int, default = 8)
args = parser.parse_args()
return args
def main():
args = parse_args()
runs = sorted(listdirs(args.experiment_path))
num_runs = len(runs)
# Some hyper parameters
num_genders = 2
num_digits = 10
device = 'cuda:' + args.device
print(device)
# Meta data and list of data files
annotation_file = '/home/edvinli/thesis_spring_2020/audio_mnist/audioMNIST_meta.json'
train_file_index = librosa.util.find_files('/home/edvinli/thesis_spring_2020/audio_mnist/')
split_ratio = 5
# Build indices for the data
file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id = dataset.build_annotation_index(
train_file_index, annotation_file, balanced_genders = False)
test_annotation_index, train_annotation_index, test_ids, train_ids = dataset.balanced_annotation_split(file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id, split_ratio)
# Create the dataset
test_data = dataset.AnnotatedAudioDataset(
test_annotation_index, args.sampling_rate, args.segment_length
)
n_test = test_data.__len__()
if args.save_audio:
test_loader = DataLoader(test_data, batch_size = 25, num_workers = 1)
else:
test_loader = DataLoader(test_data, batch_size = args.batch_size, num_workers = 1)
# Set up models that are not trained
fft = Audio2Mel(sampling_rate = args.sampling_rate)
Mel2Audio = MelGAN_Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
utility_netD = load_modified_AlexNet(num_digits).to(device)
fixed_netD = load_modified_AlexNet(num_genders).to(device)
utility_audio_net = AudioNet(num_digits).to(device)
privacy_audio_net = AudioNet(num_genders).to(device)
spec_FID_net = FID_AlexNet(num_digits).to(device)
audio_FID_net = AudioNet(num_digits).to(device)
# Pretrained Mel spectrogram inversion and digit classification
Mel2Audio.load_state_dict(torch.load('mel2wav/best_netG_epoch_2120.pt'))
utility_netD.load_state_dict(torch.load('fixed_classifier_checkpoints/best_digit_alexnet_spectrograms_epoch_26.pt'))
fixed_netD.load_state_dict(torch.load('fixed_classifier_checkpoints/best_gender_alexnet_epoch_29.pt'))
utility_audio_net.load_state_dict(torch.load('fixed_classifier_checkpoints/audio_digit_net_early_stop_epoch_26.pt'))
privacy_audio_net.load_state_dict(torch.load('fixed_classifier_checkpoints/audio_gender_net_early_stop_epoch_36.pt'))
utility_netD.eval()
fixed_netD.eval()
utility_audio_net.eval()
privacy_audio_net.eval()
# Pretrained FID loading
spec_FID_net.load_state_dict(torch.load('fixed_classifier_checkpoints/best_digit_alexnet_spectrograms_epoch_26.pt'))
audio_FID_net.load_state_dict(torch.load('fixed_classifier_checkpoints/audio_digit_net_early_stop_epoch_26.pt'))
# Initialize arrays for accuracies and fid scores
spec_digit_accuracy_F = []
spec_original_gender_accuracy_F = []
spec_digit_accuracy_G = []
spec_original_gender_accuracy_G = []
spec_sampled_gender_accuracy_G = []
audio_digit_accuracy_F = []
audio_original_gender_accuracy_F = []
audio_digit_accuracy_G = []
audio_original_gender_accuracy_G = []
audio_sampled_gender_accuracy_G = []
fid_spec_F = []
fid_spec_G = []
fid_audio_F = []
fid_audio_G = []
fid_inverted_audio = []
for i in range(num_runs):
run_path = os.path.join(args.experiment_path, runs[i])
result_dir = os.path.join(run_path, 'results')
audio_result_dir = os.path.join(result_dir, 'audio')
spec_result_dir = os.path.join(result_dir, 'spectrograms')
checkpoint_dir = os.path.join(run_path, 'checkpoints')
# os.mkdir(result_dir)
# os.mkdir(audio_result_dir)
# os.mkdir(spec_result_dir)
# Set up and load trained model
netF = UNetFilter(1, 1, chs=[8, 16, 32, 64, 128], kernel_size = args.filter_receptive_field, image_width=32, image_height=80, noise_dim=10, nb_classes=2, embedding_dim=16, use_cond = False).to(device)
netG = UNetFilter(1, 1, chs=[8, 16, 32, 64, 128], kernel_size = args.filter_receptive_field, image_width=32, image_height=80, noise_dim=10, nb_classes=2, embedding_dim=16, use_cond = True).to(device)
netF.load_state_dict(torch.load(os.path.join(checkpoint_dir, 'netF_latest_epoch_1000.pt')))
netG.load_state_dict(torch.load(os.path.join(checkpoint_dir, 'netG_latest_epoch_1000.pt')))
spec_correct_digit_F = 0
spec_correct_original_gender_F = 0
spec_correct_digit_G = 0
spec_correct_original_gender_G = 0
spec_correct_sampled_gender_G = 0
audio_correct_digit_F = 0
audio_correct_original_gender_F = 0
audio_correct_digit_G = 0
audio_correct_original_gender_G = 0
audio_correct_sampled_gender_G = 0
acts_real_spec = []
acts_fake_spec_F = []
acts_fake_spec_G = []
acts_real_audio = []
acts_fake_audio_F = []
acts_fake_audio_G = []
acts_inverted_audio = []
for j, (x, gender, digit, speaker_id) in enumerate(test_loader):
x = torch.unsqueeze(x,1)
spectrograms = fft(x).detach()
spectrograms, means, stds = preprocess_spectrograms(spectrograms)
spectrograms = torch.unsqueeze(spectrograms,1).to(device)
gender = gender.to(device)
digit = digit.to(device)
# --------------------------
# Spectrogram calculations
# --------------------------
z1 = torch.randn(spectrograms.shape[0], 10).to(device)
filter_mel = netF(spectrograms,z1, gender.long())
z2 = torch.randn(filter_mel.shape[0], 10).to(device)
gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], filter_mel.shape[0]))).to(device)
gen_mel = netG(filter_mel,z2,gen_secret)
spec_pred_digit_F = utility_netD(filter_mel)
spec_pred_gender_F = fixed_netD(filter_mel)
spec_pred_digit_G = utility_netD(gen_mel)
spec_pred_gender_G = fixed_netD(gen_mel)
# Calculate digit accuracy for fixed net on F and G outputs
pred_digit_F = torch.argmax(spec_pred_digit_F.data,1)
spec_correct_digit_F += (pred_digit_F == digit).sum().item()
pred_digit_G = torch.argmax(spec_pred_digit_G.data,1)
spec_correct_digit_G += (pred_digit_G == digit).sum().item()
# Calculate gender accuracy for fixed net on F and G outputs
pred_gender_F = torch.argmax(spec_pred_gender_F.data,1)
spec_correct_original_gender_F += (pred_gender_F == gender.long()).sum().item()
pred_gender_G = torch.argmax(spec_pred_gender_G.data,1)
spec_correct_original_gender_G += (pred_gender_G == gender.long()).sum().item()
spec_correct_sampled_gender_G += (pred_gender_G == gen_secret).sum().item()
# Calculate FID on transformed spectrograms
# acts1_tmp = spec_FID_net(spectrograms)
# acts2_tmp = spec_FID_net(filter_mel)
# acts3_tmp = spec_FID_net(gen_mel)
#
# acts_real_spec.append(np.squeeze(acts1_tmp.detach().cpu().numpy()))
# acts_fake_spec_F.append(np.squeeze(acts2_tmp.detach().cpu().numpy()))
# acts_fake_spec_G.append(np.squeeze(acts3_tmp.detach().cpu().numpy()))
# Save spectrogram samples
if args.save_specs:
z1 = torch.randn(spectrograms.shape[0], 10).to(device)
filtered = netF(spectrograms,z1,gender.long()).detach()
z2 = torch.randn(spectrograms.shape[0], 10).to(device)
male = Variable(LongTensor(spectrograms.size(0)).fill_(1.0), requires_grad=False).to(device)
female = Variable(LongTensor(spectrograms.size(0)).fill_(0.0), requires_grad=False).to(device)
generated_male = netG(filtered, z2, male).detach()
generated_female = netG(filtered, z2, female).detach()
filtered = torch.squeeze(filtered,1).to(device) * 3 * stds.to(device) + means.to(device)
generated_male = generated_male.to(device) * 3 * stds.to(device) + means.to(device)
generated_female = generated_female.to(device) * 3 * stds.to(device) + means.to(device)
spectrograms = spectrograms.to(device) * 3 * stds.to(device) + means.to(device)
if gender == 0:
gender_title = 'female'
else:
gender_title = 'male'
orig_title = 'Original spectrogram - Gender: {} - Digit: {}'.format(gender_title, digit.item())
male_title = 'Sampled gender: male '
female_title = 'Sampled gender: female'
filtered_title = 'Filtered spectrogram'
f_name_original = os.path.join(spec_result_dir, 'speaker_{}_digit_{}_original.png'.format(
speaker_id.item(), digit.item()
))
f_name_male = os.path.join(spec_result_dir, 'speaker_{}_digit_{}_male.png'.format(
speaker_id.item(), digit.item()
))
f_name_female = os.path.join(spec_result_dir, 'speaker_{}_digit_{}_female.png'.format(
speaker_id.item(), digit.item()
))
f_name_filtered = os.path.join(spec_result_dir, 'speaker_{}_digit_{}_filtered.png'.format(speaker_id.item(), digit.item()
))
save_spec_plot(f_name_original, spectrograms, orig_title)
save_spec_plot(f_name_male, generated_male, male_title)
save_spec_plot(f_name_female, generated_female, female_title)
save_spec_plot(f_name_filtered, filtered, filtered_title)
# --------------------------
# Audio calculations
# --------------------------
# Denormalize spectrograms before inversion
filter_mel = torch.squeeze(filter_mel,1).to(device) * 3 * stds.to(device) + means.to(device)
gen_mel = torch.squeeze(gen_mel,1).to(device) * 3 * stds.to(device) + means.to(device)
spectrograms = torch.squeeze(spectrograms,1).to(device) * 3 * stds.to(device) + means.to(device)
# Invert spectrograms using MelGAN
original_audio = Mel2Audio(spectrograms)
filter_audio = Mel2Audio(filter_mel)
gen_audio = Mel2Audio(gen_mel)
# Classify transformed audio
audio_pred_digit_F, _ = utility_audio_net(filter_audio)
audio_pred_gender_F, _ = privacy_audio_net(filter_audio)
audio_pred_digit_G, _ = utility_audio_net(gen_audio)
audio_pred_gender_G, _ = privacy_audio_net(gen_audio)
pred_digit_F = torch.argmax(audio_pred_digit_F.data, 1)
pred_gender_F = torch.argmax(audio_pred_gender_F.data, 1)
pred_digit_G = torch.argmax(audio_pred_digit_G.data, 1)
pred_gender_G = torch.argmax(audio_pred_gender_G.data, 1)
audio_correct_digit_F += (pred_digit_F == digit).sum().item()
audio_correct_original_gender_F += (pred_gender_F == gender.long()).sum().item()
audio_correct_digit_G += (pred_digit_G == digit).sum().item()
audio_correct_sampled_gender_G += (pred_gender_G == gen_secret).sum().item()
audio_correct_original_gender_G += (pred_gender_G == gender.long()).sum().item()
# Compute activations for FID calculations for the transformed audio
_, acts1_tmp = audio_FID_net(x.to(device))
_, acts2_tmp = audio_FID_net(filter_audio.to(device))
_, acts3_tmp = audio_FID_net(gen_audio.to(device))
_, acts4_tmp = audio_FID_net(original_audio.to(device))
acts1_tmp = torch.flatten(acts1_tmp,1)
acts2_tmp = torch.flatten(acts2_tmp,1)
acts3_tmp = torch.flatten(acts3_tmp,1)
acts4_tmp = torch.flatten(acts4_tmp,1)
acts_real_audio.append(np.squeeze(acts1_tmp.detach().cpu().numpy()))
acts_fake_audio_F.append(np.squeeze(acts2_tmp.detach().cpu().numpy()))
acts_fake_audio_G.append(np.squeeze(acts3_tmp.detach().cpu().numpy()))
acts_inverted_audio.append(np.squeeze(acts4_tmp.detach().cpu().numpy()))
# --------------------------
# Save audio sample
# --------------------------
if args.save_audio:
if j % 2 == 0:
original_audio_sample = torch.squeeze(original_audio[0]).detach().cpu()
gen_audio_sample = torch.squeeze(gen_audio[0]).detach().cpu()
speaker_id_sample = speaker_id[0].detach().cpu()
digit_sample = digit[0].detach().cpu()
gender_sample = gender[0].detach().cpu()
gen_secret_sample = gen_secret[0].detach().cpu()
f_name_orig_audio = os.path.join(audio_result_dir,'speaker_{}_digit_{}_original_inverted.wav'.format(speaker_id_sample, digit_sample, gender_sample, gen_secret_sample))
f_name_gen_audio = os.path.join(audio_result_dir,'speaker_{}_digit_{}_gender_orig_{}_sampled_{}.wav'.format(speaker_id_sample, digit_sample, gender_sample, gen_secret_sample))
save_sample(f_name_orig_audio, args.sampling_rate, original_audio_sample)
save_sample(f_name_gen_audio, args.sampling_rate, gen_audio_sample)
# Calcuate accuracies
spec_digit_accuracy_F.append(100*spec_correct_digit_F / n_test)
spec_original_gender_accuracy_F.append(100*spec_correct_original_gender_F / n_test)
spec_digit_accuracy_G.append(100*spec_correct_digit_G / n_test)
spec_original_gender_accuracy_G.append(100*spec_correct_original_gender_G / n_test)
spec_sampled_gender_accuracy_G.append(100*spec_correct_sampled_gender_G / n_test)
audio_digit_accuracy_F.append(100*audio_correct_digit_F / n_test)
audio_original_gender_accuracy_F.append(100*audio_correct_original_gender_F / n_test)
audio_digit_accuracy_G.append(100*audio_correct_digit_G / n_test)
audio_original_gender_accuracy_G.append(100*audio_correct_original_gender_G / n_test)
audio_sampled_gender_accuracy_G.append(100*audio_correct_sampled_gender_G / n_test)
# Concatenate batch activations into single array
# acts_real_spec = np.concatenate(acts_real_spec, axis = 0)
# acts_fake_spec_F = np.concatenate(acts_fake_spec_F, axis = 0)
# acts_fake_spec_G = np.concatenate(acts_fake_spec_G, axis = 0)
acts_real_audio = np.concatenate(acts_real_audio, axis = 0)
acts_fake_audio_F = np.concatenate(acts_fake_audio_F, axis = 0)
acts_fake_audio_G = np.concatenate(acts_fake_audio_G, axis = 0)
acts_inverted_audio = np.concatenate(acts_inverted_audio, axis = 0)
# Calculate FID scores
# fid_spec_tmp_F = compute_frechet_inception_distance(acts_real_spec, acts_fake_spec_F)
# fid_spec_tmp_G = compute_frechet_inception_distance(acts_real_spec, acts_fake_spec_G)
#fid_audio_tmp_F = compute_frechet_inception_distance(acts_real_audio, acts_fake_audio_F)
#fid_audio_tmp_G = compute_frechet_inception_distance(acts_real_audio, acts_fake_audio_G)
#if i == 0:
# fid_inverted_audio_tmp = compute_frechet_inception_distance(acts_real_audio, acts_inverted_audio)
# fid_spec_F.append(fid_spec_tmp_F)
#fid_audio_F.append(fid_audio_tmp_F)
# fid_spec_G.append(fid_spec_tmp_G)
#fid_audio_G.append(fid_audio_tmp_G)
#fid_inverted_audio.append(fid_inverted_audio_tmp)
print("Computed accuracies and FID for run {}.".format(i))
# Create data frames with the accuracies
spec_digit_acc_F_df = pd.DataFrame(spec_digit_accuracy_F)
spec_orig_gender_acc_F_df = pd.DataFrame(spec_original_gender_accuracy_F)
spec_digit_acc_G_df = pd.DataFrame(spec_digit_accuracy_G)
spec_orig_gender_acc_G_df = pd.DataFrame(spec_original_gender_accuracy_G)
spec_sampled_gender_acc_G_df = pd.DataFrame(spec_sampled_gender_accuracy_G)
audio_digit_acc_F_df = pd.DataFrame(audio_digit_accuracy_F)
audio_orig_gender_acc_F_df = | pd.DataFrame(audio_original_gender_accuracy_F) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
from unidecode import unidecode
# Map district in Kraków to integers.
# For details see:
# https://en.wikipedia.org/wiki/Districts_of_Krak%C3%B3w
districts = {'stare miasto': 1,
'grzegórzki': 2,
'prądnik czerwony': 3,
'prądnik biały': 4,
'krowodrza': 5,
'bronowice': 6,
'zwierzyniec': 7,
'dębniki': 8,
'łagiewniki': 9,
'borek fałęcki': 9,
'swoszowice': 10,
'podgórze duchackie': 11,
'bieżanów': 12,
'prokocim': 12,
'podgórze': 13,
'czyżyny': 14,
'mistrzejowice': 15,
'bieńczyce': 16,
'wzgórza krzesławickie': 17,
'nowa huta': 18}
# Remove polish characters from key names
for key in list(districts.keys()):
districts[unidecode(key)] = districts.pop(key)
# Translate data from polish to english.
translation = {'Cena': 'Price',
'Lokalizacja': 'Location',
'Data dodania': 'Date',
'Na sprzedaż przez': 'Seller',
'Rodzaj nieruchomości': 'Property',
'Liczba pokoi': 'Rooms',
'Liczba łazienek': 'Bathrooms',
'Wielkość (m2)': 'Area',
'Parking': 'Parking',
'Tytuł': 'Title',
'Opis': 'Description',
'Link': 'Link'}
def remove_polish_characters(x):
"""
Remove polsih chars
Examples
--------
>>> remove_polish_characters('ąćęłńóśźż')
'acelnoszz'
"""
if pd.isnull(x):
return x
else:
x = unidecode(x)
return x
def parse_price(x):
"""
Convert string with price to a integer value.
Parameters
----------
x : str
Row from price column.
Returns
-------
int :
Price of the property.
Example
-------
>>> parse_price('349\xa0000 zł')
349000
>>> parse_price('349 000 zł')
349000
>>> parse_price('349\xa0000')
349000
>>> parse_price('349000')
349000
>>> parse_price(349000)
349000
>>> parse_price(349000.1235)
349000
>>> parse_price(np.nan)
nan
>>> parse_price('Proszę o kontakt')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.replace('\xa0', '')
x = x.replace('zł', '')
x = x.replace(' ', '')
x = x.strip()
try:
x = int(x)
except ValueError:
x = np.nan
return x
elif isinstance(x, int):
return x
elif isinstance(x, float):
x = int(x)
return x
else:
return np.nan
def extract_currency(x):
"""
Exctract currency from price column.
Examples
--------
>>> extract_currency('123000zł')
'pln'
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'zł' in x or 'zł' in x or 'pln' in x:
return 'pln'
else:
return np.nan
else:
return np.nan
def parse_bathrooms(x):
"""
Extract first digit from string
describing the number of bathrooms.
Parameters
----------
x : str
String describing the number of bathrooms.
Returns
-------
int :
The number of bathrooms or nan.
Examples
--------
>>> parse_bathrooms('1 łazienka')
1
>>> parse_bathrooms('2 łazienki')
2
>>> parse_bathrooms('4')
4
>>> parse_bathrooms(3)
3
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = [s for s in x if s.isdigit()]
if x:
return int(x[0])
else:
return np.nan
elif isinstance(x, int):
return x
elif isinstance(x, float):
return int(x)
else:
return np.nan
def parse_rooms(x):
"""
Extract first digit in string
describing the number of bathrooms.
Parameters
----------
x : str
Row of rooms column.
Returns
-------
int
The number of rooms in the property.
Examples
--------
>>> parse_rooms('2 pokoje')
2
>>> parse_rooms('5')
5
>>> parse_rooms('3')
3
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
# Check for special
# cases first
x = x.lower()
if 'kawalerka' in x:
return 1
elif 'garsoniera' in x:
return 1
else:
# If not special case extract
# first digit in string.
x = [s for s in x if s.isdigit()]
if x:
return int(x[0])
else:
return np.nan
elif isinstance(x, float):
return int(x)
elif isinstance(x, int):
return x
else:
return np.nan
def extract_city(x):
"""
Extract city from location column.
Parameters
----------
x : str
Row of location column.
Returns
-------
str :
Kraków if the property is
located in Kraków else nan.
Examples
--------
>>> extract_city('Piotra Stachiewicza, Kraków-Krowodrza, Kraków')
'kraków'
>>> extract_city('os. Na Stoku, Kraków-Nowa Huta, Kraków')
'kraków'
>>> extract_city('Modlniczka, Wielka Wieś, krakowski')
nan
>>> extract_city('random string')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.split(',')
x = [s.strip().lower() for s in x]
if 'kraków' in x or 'krakow' in x or 'cracow' in x:
return 'kraków'
else:
return np.nan
else:
return np.nan
def extract_district(x):
"""
Extract district from location column.
Parameters
----------
x : str
Row from location column.
Returns
-------
str :
The district where the property is located.
Examples
--------
>>> extract_district('Piotra Stachiewicza, Kraków-Krowodrza, Kraków')
'krowodrza'
>>> extract_district('os. Na Stoku, Kraków-Nowa Huta, Kraków')
'nowa huta'
>>> extract_district('Modlniczka, Wielka Wieś, krakowski')
nan
>>> extract_city('random string')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
x = x.replace('kraków', '')
x = x.replace(',', ' ')
x = x.replace('-', ' ')
x = x.replace('.', ' ')
x = x.split(' ')
x = [s.replace(' ', '') for s in x if s != '']
x = ' '.join(x)
if x == '':
return np.nan
else:
for key in districts:
if key in x:
return key
return np.nan
def parse_seller(x):
"""
Translate seller column to english.
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'agencja' in x:
return 'realtor'
elif 'właściciel' in x:
return 'owner'
else:
return np.nan
else:
return np.nan
def parse_property(x):
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'dom' in x:
return 'house'
elif 'mieszkanie' in x:
return 'flat'
else:
return np.nan
else:
return np.nan
def parse_parking(x):
"""
Translate parking column.
Examples
--------
>>> parse_parking('garaż')
'garage'
>>> parse_parking('Ulica')
'street'
>>> parse_parking('Brak')
'no parking'
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'garaż' in x:
return 'garage'
elif 'kryty' in x:
return 'covered'
elif 'ulica' in x:
return 'street'
elif 'brak' in x:
return 'no parking'
else:
return np.nan
else:
return np.nan
def extract_garden(x):
"""
Check if property has garden.
Examples
--------
>>> extract_garden('piękny ogrod')
True
>>> extract_garden('piękny dom')
False
>>> extract_garden('1223')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'ogrod' in x:
return True
else:
return False
else:
return False
def extract_balcony(x):
"""
Check if property has balcony.
Examples
--------
>>> extract_balcony('Mieszkanie z pięknym balkonem')
True
>>> extract_balcony('Brzydkie mieszkanie')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'balkon' in x:
return True
else:
return False
else:
return False
def extract_terrace(x):
"""
Check if property has terrace.
Examples
--------
>>> extract_terrace('Mieszkanie z tarasem')
True
>>> extract_terrace('Brzydkie mieszkanie')
False
>>> extract_terrace('125')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'taras' in x:
return True
else:
return False
else:
return False
def extract_basement(x):
"""
Check if property has a basement.
Examples
--------
>>> extract_basement('Mieszkanie z przynależną piwnica')
True
>>> extract_basement('Pierwsze pietro')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'piwnica' in x:
return True
else:
return False
else:
return False
def extract_new(x):
"""
Check if property is new.
Examples
--------
>>> extract_new('Nowe mieszkanie')
True
>>> extract_new('Stare mieszkanie')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'nowe' in x or 'nowa' in x:
return True
else:
return False
else:
return False
def extract_block(x):
"""
Check if property is in an block.
Examples
--------
>>> extract_block('Piękne mieszkanie w bloku xxx')
True
>>> extract_block(123)
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'bloku' in x:
return True
else:
return False
else:
return False
def extract_town_house(x):
"""
Check if property is in a town house.
Examples
--------
>>> extract_town_house('Małe mieszkanie w kamienicy')
True
>>> extract_town_house('Duże mieszkanie w bloku')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'kamienica' in x or 'kamienicy' in x:
return True
else:
return False
else:
return False
def extract_apartment(x):
"""
Check if property is an apartment.
Examples
--------
>>> extract_apartment('Apartament na sprzedaż')
True
>>> extract_apartment('Kawalerka na sprzedaż')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'apartament' in x or 'apartamencie' in x:
return True
else:
return False
else:
return False
def extract_studio(x):
"""
Check if property is studio flat.
Examples
--------
>>> extract_studio('Kawalerka na sprzedaż')
True
>>> extract_studio('Apartament na sprzedaż')
False
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'kawalerka' in x or 'kawaler' in x:
return True
else:
return False
else:
return False
def extract_bus_stops(x):
"""
Check is property has bus stops nearby.
Examples
--------
>>> extract_bus_stops('Blisko przystanki komunkacji miejskiej')
True
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.lower()
if 'miejskiej' in x:
return True
else:
return False
else:
return False
def parse_title(x):
"""
Remove non letters from title.
Examples
--------
>>> parse_title('Piękne mieszkanie !!!')
'piękne mieszkanie'
"""
if | pd.isnull(x) | pandas.isnull |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
| tm.assert_index_equal(r1, r2) | pandas.util.testing.assert_index_equal |
from unittest import TestCase
from os import path as os_path
import shutil
from pandas import DatetimeIndex, DataFrame, Series, Timedelta, infer_freq, \
Timestamp
import numpy as np
from plotly.graph_objects import Figure
from matplotlib.axes import Axes
from timeatlas import TimeSeries, Metadata
from timeatlas.config.constants import *
class TestTimeSeries(TestCase):
def setUp(self) -> None:
# Create a time indexed series
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
self.my_data = Series([0.4, 1.0, 0.7, 0.6], index=index).to_frame()
# Create metadata
my_unit = {
"name": "power",
"symbol": "W",
"data_type": "float"
}
my_coordinates = {
"lat": 46.796611,
"lon": 7.147563
}
my_dict = {
"unit": my_unit,
"coordinates": my_coordinates
}
self.my_metadata = Metadata(my_dict)
# self.my_time_series = TimeSeries(self.my_series, self.my_metadata)
self.my_time_series = TimeSeries(self.my_data)
# Define a target directory
self.target_dir = "data/test-export"
def test__init__is_instance(self):
my_time_series = TimeSeries()
self.assertIsInstance(my_time_series, TimeSeries,
"The TimeSeries hasn't the right type")
def test__init__has_right_types(self):
# Add some data
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
my_series = DataFrame([0.4, 1.0, 0.7, 0.6], index=index)
my_metadata = Metadata()
my_ts = TimeSeries(my_series, my_metadata)
# Check types
self.assertIsInstance(my_ts._data, DataFrame,
"The TimeSeries series is not a Pandas DataFrame")
self.assertIsInstance(my_ts.metadata, Metadata,
"The TimeSeries Metadata hasn't got the right type")
def test__init__contains_metadata(self):
# Add some data
index = DatetimeIndex(['2019-01-01', '2019-01-02',
'2019-01-03', '2019-01-04'])
my_series = DataFrame([0.4, 1.0, 0.7, 0.6], index=index)
my_metadata = Metadata()
my_ts = TimeSeries(my_series, my_metadata)
# Check types
self.assertNotEqual(my_ts.metadata, None,
"The TimeSeries Metadata is probably None")
def test__init__has_values_as_column_name(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
ts = TimeSeries(my_series)
self.assertTrue(COMPONENT_VALUES in ts._data.columns)
def test__init__wrong_index_type(self):
values = Series([0.4, 1.0, 0.7, 0.6])
with self.assertRaises(AssertionError):
TimeSeries(values)
def test__init__with_Series_input(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
ts = TimeSeries(my_series)
self.assertTrue(COMPONENT_VALUES in ts._data.columns)
self.assertIsInstance(ts, TimeSeries)
def test__init__with_DataFrame_input_single_column(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
df = DataFrame(data=my_series)
ts = TimeSeries(df)
self.assertTrue(COMPONENT_VALUES in ts._data.columns)
self.assertIsInstance(ts, TimeSeries)
def test__init__with_DataFrame_input_many_columns__without_values(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
df = DataFrame({"one": my_series, "two": my_series})
with self.assertRaises(AssertionError):
ts = TimeSeries(df)
def test__init__with_DataFrame_input_many_columns__with_values(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
df = DataFrame({COMPONENT_VALUES: my_series, "two": my_series})
ts = TimeSeries(df)
self.assertIsInstance(ts, TimeSeries)
def test__init__freq_is_infered(self):
index = DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = Series([0.4, 1.0, 0.7, 0.6], index=index)
ts = TimeSeries(my_series)
self.assertEqual( | infer_freq(index) | pandas.infer_freq |
import os
import pytest
import pandas as pd
import numpy as np
from collections import OrderedDict
from ..catalog_matching import (crossmatch,
select_min_dist,
post_k2_clean,
find_campaigns,
match_k2_epic_campaign,
extract_extensions,
assign_PMem_mean,
merge_k2_info_and_protocol,
crossmatch_multiple_catalogs,
pick_members_and_produce_k2_search_input,
)
ra = 'RAJ2000'
dec = 'DEJ2000'
def test_pick_members_and_produce_k2_search_input():
#---------------------------------------------------------
# Produce fake data
cross = pd.DataFrame(dict(zip(["RAJ2000_1", "DEJ2000_1",
"somecolumn", "PMem_1",
"PMem_2", "PMem_3"],
[[20, 20, 20],
[20, 20, 20],
["rolf", "hagen", "busch"],
[.1, .8, .9],
[.1, .8, .9],
[.9, np.nan, .9]],
)))
sname, name = "test", "Test"
coords = "1"
series = cross.loc[2,:] #this row should be preserved in result
outfile = ('catalog_matching/matched_catalogs/'
'membership_matches/radec/{}_radec.csv'
.format(sname))
#---------------------------------------------------------
# Call function
res = pick_members_and_produce_k2_search_input(cross, sname,
name, coords=coords)
df = pd.read_csv(outfile, header=None)
#---------------------------------------------------------
# Check if the RA/Dec file is correct:
assert df.loc[0,0] == 20
assert df.loc[0,1] == 20
assert df.shape[0] == 1
assert df.shape[1] == 2
# Remove output file
os.remove(outfile)
# Check if the DataFrame was processed correctly
assert res.shape[0] == 1 # only one member is left
assert (res.loc[2,series.index] == series).all() # input columns are preserved
def test_crossmatch_multiple_catalogs():
#-----------------------------------------------------------
# Create a fake data set
diff = 1.49/3600 # 1.5 arcsec distance
c1 = pd.DataFrame(dict(zip(["RAJ2000_1","DEJ2000_1","PMem_1"],
[[21,20,19],[10,10,10],[.9,.8,.7]])))
c2 = pd.DataFrame(dict(zip(["RAJ2000_2","DEJ2000_2","PMem_2","binary_2"],
[[21,20+diff,19+3*diff],[10,10,10],
[.75,.85,.3],[.1,.02,.11]])))
c3 = pd.DataFrame(dict(zip(["RAJ2000_3","DEJ2000_3","PMem_3"],
[[np.nan,20-diff,19],[10,10,10],[.9,.9,.9]])))
d = {"1":c1, "2":c2, "3":c3}
renamed_catalogs = OrderedDict(sorted(d.items(), key=lambda t: t[0])) # order the dicts, not necessary for performance but helpful for testing
name = "Test"
sname = "test"
#-----------------------------------------------------------
# Call the function
res = crossmatch_multiple_catalogs(renamed_catalogs, name, sname,
arcsec=3., plot=True, union=True,
bijective=True)
#-----------------------------------------------------------
# Do some checks
# Check that the table size is correct
assert res.shape[0] == 5
assert res.shape[1] == 16
# Check that relevant columns are created with the right names/values
assert "DEJ2000_1_2_3" in res.columns.tolist()
assert set(c1.columns.values).issubset(set(res.columns.values))
assert set(c2.columns.values).issubset(set(res.columns.values))
assert set(c3.columns.values).issubset(set(res.columns.values))
# Check that the distance calculation was done correctly
assert res.loc[1, "dist_1_2_3"] == pytest.approx(2.235, rel=.1)
assert res.loc[2, "dist_1_2_3"] == 0.
# Check that NaNs stay NaNs:
assert np.isnan(res.loc[4, "RAJ2000_3"])
# Check individual values and NaNs:
assert res.loc[2, "RAJ2000_1_2_3"] == 19.
assert (res.DEJ2000_1_2_3.values == 10.).all()
assert res.dist_1_2.dropna().shape[0] == 2
assert res.dist_1_2_3.dropna().shape[0] == 2
def test_merge_k2_info_and_protocol():
# Fake data
folder = "catalog_matching/tests/exfiles/"
sname = "tiny"
df = | pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union_k2.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
| tm.assert_numpy_array_equal(rindexer, exp) | pandas._testing.assert_numpy_array_equal |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from src.utils.io import load, save
def drop_not_relevant(features, targets, days):
features = features.iloc[days:]
targets = targets.iloc[days:]
return features, targets
def impute(features):
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
features_imputed = pd.DataFrame(imp_mean.fit_transform(features), columns=features.columns)
return features_imputed
def build_timeseries_windows(dataset, target, start_index, end_index, lookback,
target_size, step, single_step=True):
data = []
labels = []
start_index = start_index + lookback
if end_index is None:
end_index = min(len(dataset), len(target)) - target_size
for i in range(start_index, end_index):
indices = range(i-lookback, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
def scale(data, train_size):
data_mean = data[:-train_size].mean(axis=0)
data_std = data[:-train_size].std(axis=0)
data = (data-data_mean)/data_std
return data
def train_val_test_split(x, y, test_size, val_size):
test_x = x[-test_size:]
test_y = y[-test_size:]
val_x = x[-(test_size+val_size):-test_size]
val_y = y[-(test_size+val_size):-test_size]
train_x = x[:-(test_size+val_size)]
train_y = y[:-(test_size+val_size)]
print(train_x.shape)
print(val_x.shape)
print(test_x.shape)
return train_x, train_y, val_x, val_y, test_x, test_y
def check_if_data_valid(dataset, lookback):
train_x, train_y, val_x, val_y, test_x, test_y = dataset
for data_x, data_y in zip([train_x, val_x, test_x], [train_y, val_y, test_y]):
data_x_bin = (data_x >= 0).astype(np.int32)
for i in range(0, data_y.shape[0]-lookback):
assert data_x_bin[i+lookback, 0, 0] == data_y[i]
def build_data(features, targets, lookback=1, test_size=250, val_size=250, encode_binary=True,
scaled=True, check_data=True, pct_change=True):
name = 'data_lookback_' + str(lookback)
future_target = 1
step = 1
if pct_change:
targets = pd.DataFrame(targets['Close'].pct_change(), columns=['Close'])
else:
targets = | pd.DataFrame(targets['Close'], columns=['Close']) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = new_idx.copy()
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
expected.index.name = "Idx"
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(df, con=self.conn, name="testkeywords", index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=["c0"])
sql.to_sql(mono_df, con=self.conn, name="mono_df", index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
msg = "'notvalidvalue' is not valid for if_exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
msg = "Table 'table_if_exists' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
@pytest.mark.single
@pytest.mark.db
@pytest.mark.skip(
reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
)
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
self.method = request.function
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_chunksize_read_type(self):
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name="test", con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(
sql=query, con=self.conn, chunksize=chunksize, index_col="index"
)
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert "PRIMARY KEY (`A`, `B`)" in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
| sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn) | pandas.io.sql.execute |
#!/usr/bin/env python
'''
This script requires some additional dependencies, which are installed using:
pip install pandas utm colour
'''
import django
from geocamUtil.loader import LazyGetModelByName
django.setup()
from django.conf import settings
from xgds_braille_app.models import BandDepthDefinition, BandDepthGeoJSON, BandDepthTimeSeries
import pandas as pd
from colour import Color
from utm import from_latlon, to_latlon
from json import dumps
colors = list(Color("blue").range_to(Color("red"), 100))
def lat_lon_to_utm(row):
return from_latlon(row['latitude'], row['longitude'])
def clip_to_range(minimum, maximum, x):
return max(minimum, min(x, maximum))
def scale_between(minimum, maximum, x):
rng = maximum - minimum
return minimum + x * rng
def get_color(percentage):
clipped = clip_to_range(0, 0.5, percentage)
clipped *= 2 # it is now between 0 and 1
return colors[int(clipped * (len(colors) - 1))]
def create_feature_collection(collection):
return {
"type": "FeatureCollection",
"features": collection,
}
def create_geojson(easting, northing, zone_number, zone_letter, band_depth, confidence, stddev):
clipped_confidence = clip_to_range(0, 150, confidence) / 150.0
radius = scale_between(0.25, 0.5, clipped_confidence)
lat_minus_radius, lng_minus_radius = to_latlon(easting - radius, northing - radius, zone_number, zone_letter)
lat_plus_radius, lng_plus_radius = to_latlon(easting + radius, northing + radius, zone_number, zone_letter)
return {
"type": "Feature",
"properties":
{
"stroke-width": 0,
"fill": str(get_color(band_depth)),
"fill-opacity": 1,
"band-depth": "%f +- %f" % (round(float(band_depth), 4), round(float(stddev), 4)),
"confidence": int(confidence),
},
"geometry":
{
"type": "Polygon",
"coordinates":
[[
[lng_plus_radius, lat_minus_radius],
[lng_minus_radius, lat_minus_radius],
[lng_minus_radius, lat_plus_radius],
[lng_plus_radius, lat_plus_radius],
[lng_plus_radius, lat_minus_radius],
]]
}
}
def create_geojson_for_flight(flight, band_depth_definition):
band_depth_time_series = BandDepthTimeSeries.objects.filter(
time_stamp__gte=flight.start_time,
time_stamp__lte=flight.end_time,
band_depth_definition=band_depth_definition,
flight=flight,
)
band_depth = []
for bdts in band_depth_time_series:
band_depth.append({
"timestamp": bdts.time_stamp,
"value": bdts.band_depth,
})
band_depth = | pd.DataFrame(data=band_depth) | pandas.DataFrame |
from flask import Flask, render_template,request,redirect,flash,url_for
from flask import *
from flaskext.mysql import MySQL
from flask import request
import pandas as pd
from sklearn.model_selection import train_test_split
import missingno as msno
import warnings
warnings.filterwarnings('ignore')
import pickle
mysql = MySQL()
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb' ))
app.secret_key = 'your_secret_key'
@app.route("/")
def home():
return render_template("index.html")
@app.route("/register")
def register():
#Loading the quiz data
#reading data
df0 = pd.read_csv('data/LWR1.csv')
df1 = pd.read_csv('data/MOR1.csv')
df2 = pd.read_csv('data/PCR1.csv')
df3 = pd.read_csv('data/PVR1.csv')
df4 = pd.read_csv('data/SRR1.csv')
#spelling data
df5 = pd.read_csv('data/LWS1.csv')
df6 = pd.read_csv('data/PVS1.csv')
df7 = pd.read_csv('data/SOSS1.csv')
#memory data
df8 = pd.read_csv('data/LWM1.csv')
df9 = | pd.read_csv('data/LSM1.csv') | pandas.read_csv |
import os
import pandas as pd
'''
Process Pipline:
-Get the CSV
-Get the file name in a folder
--Return list
-Frame the list of file name in sequence
--Can thought of Dict-like container for series object
--pd.DataFrame({'Column': Value})
-Assign the framed list to image2 column
--Return new_train_label['image2']
-MAJOR PART:
-The image2 label is of the form "100_right_mir.jpeg" OR "100_right_90.jpeg"
--we have to make it '100_right.jpeg' i.e remove '_90.jpeg' OR '_mir.jpeg'
-We can split the the string from '_'
--there are two '_' in the 100_right_mir.jpeg" so we have to split from
second '_' therefore use [0:2]
---Try:
test = "100_right_mir.jpeg"
print(test.split('_')[0:2])
---Return:
['100', 'right']
-To combine the splited string(['100', 'right']) use join
--join the two list with '_'
---Try:
test = "100_right_mir.jpeg"
print('_'.join(test.split('_')[0:2]))
---Return:
100_right
-Use the above concept to apply through all label and use
--"lambda argument: expression"
---EG: (Without Lambda)
def add(x, y):
return x + y
add(2, 3)
---EG: (With Lambda)
add = lambda x, y : x + y
print add(2, 3)
-Locate(.loc) the label 'image2' and apply the lambda function on it
-After the label is split and joined again there are still original files in
list which don't have '_mir.jpeg' OR '_90.jpeg' labels in it
i.e 100_right.jpeg(original file)
-So to make it like other file in the list we have to remove the '.jpeg'
extension from the list of original files and again add the '.jpeg'
to all the files in the list.
---lambda x: '_'.join(x.split('_')).strip('.jpeg')
-This will remove .jpeg extension form original file
---lambda x: '_'.join(x.split('_')).strip('.jpeg') + '.jpeg')
-This will again add the '.jpeg' extension.
-TRY PRINTING THE LABELS BEFORE AND AFTER ADDING THE '.JPEG'
SO YOU WILL GET IDEA
TRY:-
'print(new_train_label['image2'])'
-Replace the 'image' column with the 'train_image_name'
--'new_train_label.columns = ['train_image_name', 'image']'
-Merge the dataFrame with original CSV.
--pd.merge(left, right, how='outer', on=None)
---left: A DataFrame object.
---right: Another DataFrame object.
---on: Column or index level names to join on. Must be found in
both the left and right DataFrame objects
---how: One of 'left', 'right', 'outer', 'inner'.
-outer:- Use union of keys from both frames
-Save .to_csv
'''
def get_images_list(file_path):
# .DS_Store is for Mac Users
return [i for i in os.listdir(file_path) if i != '.DS_Store']
if __name__ == '__main__':
#read CSV (INCLUDE trainLabels_find_black_images.csv)
trainLabels = pd.read_csv('E:/Tirth/trainlabel_zip/trainLabels_csv/trainLabels_find_black_images.csv')
#get file name
list_images = get_images_list('E:/Tirth/train/crop_256/')
#framing the list_of_filename in sequence
new_trainLabels = pd.DataFrame({'image': list_images})
new_trainLabels['image2'] = new_trainLabels.image
#Remove the suffix from the image names.
new_trainLabels['image2'] = new_trainLabels.loc[:, 'image2'].apply(lambda x: '_'.join(x.split('_')[0:2]))
#Strip and add .jpeg back into file name
new_trainLabels['image2'] = new_trainLabels.loc[:, 'image2'].apply(
lambda x: '_'.join(x.split('_')[0:2]).strip('.jpeg') + '.jpeg')
new_trainLabels.columns = ['train_image_name', 'image']
trainLabels = | pd.merge(trainLabels, new_trainLabels, how='outer', on='image') | pandas.merge |
#!/usr/bin/env python3
# 10.05.21
# Assignment lab 07
# Master Class: Machine Learning (5MI2018)
# Faculty of Economic Science
# University of Neuchatel (Switzerland)
# Lab 7, see ML21_Exercise_7.pdf for more information
# https://github.com/RomainClaret/msc.ml.labs
# Authors:
# - <NAME> @RomainClaret
# - <NAME> @Nic0uds
# 2. Interpret the results for one year using a decision tree. (with the original fields)
# 3. Compare the results to the clusters from exercise 5.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import FunctionTransformer, Normalizer, StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from joblib import dump
# Import the data and filter the two datasets for the chosen two years.
df = pd.read_stata('pwt100.dta')
# 2. Select the input features that you deem useful for the purpose.
features = [
"hc",
"ctfp",
"cwtfp",
"delta",
"pl_con",
"pl_da",
"pl_gdpo",
"csh_g",
"pl_c",
"pl_i",
"pl_g",
#"pl_k",
]
features_logarithmic = [
#"rgdpe",
"pop",
"ccon",
#"rgdpna",
"rconna",
"xr",
]
# 1. Select the target variable rgdpna or rtfpna.
targeted_feature = [
"rgdpna", # log required
#"rtfpna", # log not required
]
# Apply the logarithmic function on badly proportionated features
log_transformer = FunctionTransformer(np.log1p)
df_log_features = df[features_logarithmic+targeted_feature]
df_log = log_transformer.transform(df_log_features)
# Concat logarithmic features with unlogarithmic features
df_concat = pd.concat([df[features], df_log], axis=1, join="inner")
# Drop rows with na values
df_cleaned = df_concat.dropna()
# normalization to merge logarithmic and non-logarithmic features
df_normalized = pd.DataFrame(Normalizer().fit_transform(df_cleaned[features+features_logarithmic+targeted_feature]))
# build train and target datasets
X = pd.DataFrame(df_normalized,columns=df_normalized.columns[:-1])
y = | pd.DataFrame(df_normalized,columns=[df_normalized.columns[-1]]) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib as mt
from gensim.models import word2vec
from sklearn.model_selection import train_test_split
import sys
# 数据合并
dealed_data = dealed_data['top10'] + [" "] + dealed_data['top9'] + [" "] + dealed_data['top8'] + [" "] + \
dealed_data['top7'] + [" "] + dealed_data['top6'] + [" "] + dealed_data['top5'] + [" "] + dealed_data[
'top4'] + [" "] + dealed_data['top3'] + [" "] + dealed_data['top2'] + [" "] + dealed_data['top1']
# 数据分列
dealed_data = [s.encode('utf-8').split() for s in dealed_data]
# 数据拆分
train_data, test_data = train_test_split(dealed_data, test_size=0.3, random_state=42)
# 原始数据训练
# sg=1,skipgram;sg=0,SBOW
# hs=1:hierarchical softmax,huffmantree
# nagative = 0 非负采样
model = word2vec.Word2Vec(train_data, sg=1, min_count=10, window=2, hs=1, negative=0)
# 最后一次购物最相似的商品组top3
x = 1000
result = []
result = pd.DataFrame(result)
for i in range(x):
test_data_split = [s.encode('utf-8').split() for s in test_data[i]]
k = len(test_data_split)
last_one = test_data_split[k - 1]
last_one_recommended = model.most_similar(last_one, topn=3)
tmp = last_one_recommended[0] + last_one_recommended[1] + last_one_recommended[2]
last_one_recommended = pd.concat([pd.DataFrame(last_one), pd.DataFrame(np.array(tmp))], axis=0)
last_one_recommended = last_one_recommended.T
result = pd.concat([pd.DataFrame(last_one_recommended), result], axis=0)
# 倒数第二次购物最相似的商品组top3
x = 1000
result1 = []
result1 = pd.DataFrame(result1)
for i in range(x):
test_data_split = [s.encode('utf-8').split() for s in test_data[i]]
k = len(test_data_split)
last_one = test_data_split[k - 2]
last_one_recommended = model.most_similar(last_one, topn=3)
tmp = last_one_recommended[0] + last_one_recommended[1] + last_one_recommended[2]
last_one_recommended = pd.concat([pd.DataFrame(last_one), pd.DataFrame(np.array(tmp))], axis=0)
last_one_recommended = last_one_recommended.T
result1 = pd.concat([ | pd.DataFrame(last_one_recommended) | pandas.DataFrame |
import numpy as np
import pandas as pd
from tqdm import tqdm
class DataTransformers:
@staticmethod
def simulate_admissions(prior, batch_size: int = None, n_iterations: int = 0):
prior_sample_dict = {}
if n_iterations == 0:
# sequential retriever
n_iterations = len(prior) // batch_size
retriver = lambda loc: np.split(prior, np.arange(batch_size, len(prior), batch_size))[loc]
else:
# bootstrap retriever
batch_size = len(prior) if batch_size is None else batch_size
retriver = lambda loc: np.random.choice(prior, size=batch_size, replace=True)
for iteration in tqdm(range(n_iterations), desc="admission batches"):
sample = retriver(iteration)
prior_sample_dict.update({
iteration: {
"readmission_mean": sample.mean(),
"readmission_sum": sample.sum(),
"patients_cnt": batch_size
}
})
prior_df = | pd.DataFrame(prior_sample_dict) | pandas.DataFrame |
"""
for f in sorted(glob('*.py')):
# print(f'nohup python -u {f} 0 > LOG/log_{f}.txt &')
print(f'python -u {f} > LOG/log_{f}.txt')
"""
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from glob import glob
import os
from socket import gethostname
HOSTNAME = gethostname()
from tqdm import tqdm
#from itertools import combinations
from sklearn.model_selection import KFold
from time import time, sleep
from datetime import datetime
from multiprocessing import cpu_count, Pool
import gc
# =============================================================================
# global variables
# =============================================================================
COMPETITION_NAME = 'santander-customer-transaction-prediction'
IMP_FILE = 'LOG/xxx.csv'
IMP_FILE_BEST = 'LOG/xxx.csv'
SUB_BEST = '../output/0328-1.csv.gz'
# =============================================================================
# def
# =============================================================================
def start(fname):
global st_time
st_time = time()
print("""
#==============================================================================
# START!!! {} PID: {} time: {}
#==============================================================================
""".format( fname, os.getpid(), datetime.today() ))
send_line(f'{HOSTNAME} START {fname} time: {elapsed_minute():.2f}min')
return
def reset_time():
global st_time
st_time = time()
return
def end(fname):
print("""
#==============================================================================
# SUCCESS !!! {}
#==============================================================================
""".format(fname))
print('time: {:.2f}min'.format( elapsed_minute() ))
send_line(f'{HOSTNAME} FINISH {fname} time: {elapsed_minute():.2f}min')
return
def elapsed_minute():
return (time() - st_time)/60
def mkdir_p(path):
try:
os.stat(path)
except:
os.mkdir(path)
def to_feature(df, path):
if df.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { df.columns[df.columns.duplicated()] }')
df.reset_index(inplace=True, drop=True)
df.columns = [c.replace('/', '-').replace(' ', '-') for c in df.columns]
for c in df.columns:
df[[c]].to_feather(f'{path}_{c}.f')
return
def to_pickles(df, path, split_size=3, inplace=True):
"""
path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p'
"""
print(f'shape: {df.shape}')
if inplace==True:
df.reset_index(drop=True, inplace=True)
else:
df = df.reset_index(drop=True)
gc.collect()
mkdir_p(path)
kf = KFold(n_splits=split_size)
for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
df.iloc[val_index].to_pickle(f'{path}/{i:03d}.p')
return
def read_pickles(path, col=None, use_tqdm=True):
if col is None:
if use_tqdm:
df = pd.concat([ pd.read_pickle(f) for f in tqdm(sorted(glob(path+'/*'))) ])
else:
print(f'reading {path}')
df = pd.concat([ pd.read_pickle(f) for f in sorted(glob(path+'/*')) ])
else:
df = pd.concat([ pd.read_pickle(f)[col] for f in tqdm(sorted(glob(path+'/*'))) ])
return df
def to_pkl_gzip(df, path):
df.to_pickle(path)
os.system('rm ' + path + '.gz')
os.system('gzip ' + path)
return
def save_test_features(df):
for c in df.columns:
df[[c]].to_pickle(f'../feature/test_{c}.pkl')
return
# =============================================================================
#
# =============================================================================
def get_dummies(df):
"""
binary would be drop_first
"""
col = df.select_dtypes('O').columns.tolist()
nunique = df[col].nunique()
col_binary = nunique[nunique==2].index.tolist()
[col.remove(c) for c in col_binary]
df = pd.get_dummies(df, columns=col)
df = pd.get_dummies(df, columns=col_binary, drop_first=True)
df.columns = [c.replace(' ', '-') for c in df.columns]
return df
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != 'object' and col_type != 'datetime64[ns]':
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float32) # feather-format cannot accept float16
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def savefig_imp(imp, path, x='gain', y='feature', n=30, title='Importance'):
import matplotlib as mpl
mpl.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.barplot(x=x, y=y, data=imp.head(n), label=x)
plt.subplots_adjust(left=.4, right=.9)
plt.title(title+' TOP{0}'.format(n), fontsize=20, alpha=0.8)
plt.savefig(path)
# =============================================================================
#
# =============================================================================
def load_train(col=None):
if col is None:
return pd.read_pickle('../data/train.pkl')
else:
return | pd.read_pickle('../data/train.pkl') | pandas.read_pickle |
import pandas as pd
#from pandas.io.parsers import read_csv
# import plotly.express as px
# import plotly.graph_objects as go
import matplotlib.pyplot as plt
import numpy as np
datapoints = 101
def dsoc_interpolation(soc_data, volt_data, soc_in):
for i in range(len(soc_data)-1):# soc_in.max()):
if soc_in == 0:
volt_out = volt_data[0]
break
if soc_in == 1:
volt_out = volt_data[len(soc_data)-1]
break
if soc_data[i+1] >= soc_in and soc_data[i] < soc_in: #Función de interpolación
volt_out = volt_data[i] + (volt_data[i+1] - volt_data[i]) * (soc_in - soc_data[i]) / (soc_data[i+1] - soc_data[i])
break
return volt_out
#################################################################################
################################# PARA DESCARGA #################################
#################################################################################
dsd = pd.read_csv('discharge_data09_10_2021_22_55.csv')#Upload csv
dsd.columns = ['time','seconds','voltage','current','capacity','temperature']
# Limpiando los datos PARA DESCARGA
dsd.capacity = dsd.capacity - dsd.capacity.min()
dsd.seconds = dsd.seconds - dsd.seconds.min()
socd = 1 - dsd.capacity/dsd.capacity.max()
dsd = dsd.assign(soc=socd.values)
dsd = dsd.sort_values(by=['seconds'], ascending=False)
dsd = dsd.reset_index(drop=True)
# print(dsd.head())
#c_eff = dsd.capacity.max() / dsd.capacity.max()
#dsd.capacity = c_eff*dsd.capacity
#soc = 1 - dsc.capacity/dsc.capacity.max()
#dsd = dsd.assign(soc=soc.values)
volt_datad = dsd.voltage.values
soc_datad = dsd.soc.values
new_socd = np.linspace(0,1,datapoints) #1000 divisions of the new x axis (SoC)
new_voltd = np.zeros(len(new_socd))
for i in range(len(new_socd)):
new_voltd[i] = dsoc_interpolation(soc_datad, volt_datad, new_socd[i])
newdsd = pd.DataFrame() #New empty DataFrame
newdsd = newdsd.assign(soc=new_socd)
newdsd = newdsd.assign(voltage=new_voltd)
##################################################################################
################################### PARA CARGA ###################################
##################################################################################
dsc = pd.read_csv('charge_data11_10_2021_23_55.csv')#Upload csv
dsc.columns = ['time','seconds','voltage','current','capacity','temperature']
# Limpiando los datos PARA CARGA
dsc.capacity = dsc.capacity - dsc.capacity.min()
dsc.seconds = dsc.seconds - dsc.seconds.min()
# Coulumb eff
c_eff = dsd.capacity.max() / dsc.capacity.max()
print(c_eff)
dsc.capacity = c_eff*dsc.capacity
socc = dsc.capacity/dsc.capacity.max()
dsc = dsc.assign(soc=socc.values)
##Prueba
print("Carga: ", dsc.capacity.max())
print("Descarga: ", dsd.capacity.max())
volt_datac = dsc.voltage.values
soc_datac = dsc.soc.values
new_socc = np.linspace(0,1,datapoints) #1000 divisions of the new x axis (SoC)
new_voltc = np.zeros(len(new_socc))
for i in range(len(new_socc)):
new_voltc[i] = dsoc_interpolation(soc_datac, volt_datac, new_socc[i])
newdsc = pd.DataFrame() #New empty DataFrame
newdsc = newdsc.assign(soc=new_socc)
newdsc = newdsc.assign(voltage=new_voltc)
soc = np.linspace(0,1,datapoints)
ocv = (newdsc.voltage.values + newdsd.voltage.values)/2
sococv = | pd.DataFrame(data={"soc":soc,"ocv": ocv}) | pandas.DataFrame |
import datetime as dt
import time
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from apipe import (
DelayedParameter,
DelayedParameters,
cached,
clear_cache,
delayed_cached,
delayed_compute,
eager_cached,
)
CACHE_DIR = Path("cache/temp/")
EPS = 0.00001
def _count_cache_files() -> int:
return len([f for f in CACHE_DIR.iterdir() if f.is_file()])
@pytest.mark.parametrize(
"data, ftype",
[
((0, 1, 3, 5, -1), "pickle"),
((0, 1.0, 3232.22, 5.0, -1.0, None), "pickle"),
([0, 1, 3, 5, -1], "pickle"),
([0, 1.0, 3232.22, 5.0, -1.0, None], "pickle"),
(np.array([0, 1.0, 3232.22, 5.0, -1.0]), "pickle"),
(np.array([dt.datetime(2019, 1, 1)] * 3), "pickle"),
(pd.Series([0, 1, 3, 5, -1]), "pickle"),
(pd.Series([0, 1.0, 3232.22, 5.0, -1.0, np.nan]), "pickle"),
(pd.Series([1, 2, 3, 4], dtype="category"), "pickle"),
(pd.Series(pd.date_range("2018-01-01", periods=5)), "pickle"),
(
pd.DataFrame(
{
"a": [0, 1, 3, 5, -1],
"b": [2, 1, 0, 0, 14],
}
),
"pickle",
),
(
pd.DataFrame(
{
"a": [0, 1.0, 3232.22, -1.0, np.nan],
"b": ["a", "b", "c", "ee", "14"],
"c": [
dt.datetime(2018, 1, 1),
dt.datetime(2019, 1, 1),
dt.datetime(2020, 1, 1),
dt.datetime(2021, 1, 1),
dt.datetime(2022, 1, 1),
],
}
),
"pickle",
),
(
pd.DataFrame(
{
"a": [0, 1, 3, 5, -1],
"b": [2, 1, 0, 0, 14],
}
),
"parquet",
),
(
pd.DataFrame(
{
"a": [0, 1.0, 3232.22, -1.0, np.nan],
"b": ["a", "b", "c", "ee", "14"],
"c": [
dt.datetime(2018, 1, 1),
dt.datetime(2019, 1, 1),
dt.datetime(2020, 1, 1),
dt.datetime(2021, 1, 1),
dt.datetime(2022, 1, 1),
],
"d": [
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
],
}
),
"parquet",
),
(
pd.DataFrame(
{
"a": [0, 1.0, 3232.22, -1.0, np.nan],
"b": [
dt.timedelta(hours=1),
dt.timedelta(hours=1),
dt.timedelta(hours=1),
dt.timedelta(hours=1),
dt.timedelta(hours=1),
],
}
),
"pickle",
),
],
)
def test_cached_load_and_hash(data, ftype):
clear_cache(CACHE_DIR)
@cached(folder=CACHE_DIR, ftype=ftype, override=False)
def load_data():
return data
@cached(folder=CACHE_DIR, ftype="pickle", override=False)
def compute_data(data):
return 0
loaded = load_data().load()
_ = compute_data(loaded).load()
assert _count_cache_files() == 4
loaded = load_data().load()
_ = compute_data(loaded).load()
assert _count_cache_files() == 4
if isinstance(data, pd.Series):
pd.testing.assert_series_equal(loaded, data)
elif isinstance(data, pd.DataFrame):
pd.testing.assert_frame_equal(loaded, data)
elif isinstance(data, np.ndarray):
np.testing.assert_equal(loaded, data)
else:
assert loaded == data
@pytest.mark.parametrize(
"data, ftype, eps, ts",
[
(
pd.DataFrame(
{
"a": [0, 1, 3, 5, -1],
"b": [2, 1, 0, 0, 14],
}
),
"parquet",
0.1,
pd.Timestamp("2018-01-01"),
),
(
pd.DataFrame(
{
"a": [0, 1.0, 3232.22, -1.0, np.nan],
"b": ["a", "b", "c", "ee", "14"],
"c": [
dt.datetime(2018, 1, 1),
dt.datetime(2019, 1, 1),
dt.datetime(2020, 1, 1),
dt.datetime(2021, 1, 1),
dt.datetime(2022, 1, 1),
],
"d": [
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
pd.Timestamp("2018-01-01"),
| pd.Timestamp("2018-01-01") | pandas.Timestamp |
#!/usr/licensed/anaconda3/2020.11/bin/python
base = "/home/jdh4/bin/gpus"
import sys
sys.path = list(filter(lambda p: p.startswith("/usr"), sys.path))
sys.path.append(base)
import json
import subprocess
import pandas as pd
from dossier import ldap_plus
if 1:
rows = []
with open(base + "/utilization.json") as fp:
for line in fp.readlines():
x = json.loads(line)
rows.append(list(x.values()))
df = pd.DataFrame(rows)
del rows
df.columns = ['timestamp', 'host', 'index', 'username', 'usage', 'jobid']
netids = list(df.username.drop_duplicates().values)
if "root" in netids: netids.remove("root")
if "OFFLINE" in netids: netids.remove("OFFLINE")
else:
cmd = "getent passwd | cut -d: -f1 | sort | uniq"
output = subprocess.run(cmd, capture_output=True, shell=True)
netids = output.stdout.decode("utf-8").split('\n')
netids.remove('')
netids.remove('+')
univ_info = ldap_plus(sorted(netids))
df = pd.DataFrame(univ_info[1:], columns=univ_info[0])
cols = ['NETID', 'POSITION', 'DEPT', 'NAME', 'SPONSOR']
df = df[cols]
df = df[ | pd.notna(df.POSITION) | pandas.notna |
import pandas as pd
import os
import string
import csv
import copy
import typing
from annotation.generation.annotation_to_template import run_wikifier as get_wikifier_result
from pathlib import Path
from collections import defaultdict
"""
How to use:
Step 1: load file
b. call "load_xlsx" send with one file path
Step 2: generate file
call "generate" after you loaded the file
the output folder location and column name config (optional) if needed
if column name config not given or partial not given the system will use:
Example file:
https://docs.google.com/spreadsheets/d/1NuTmRIxpy460S4CRdP6XORKFILssOby_RxiFbONXwv0/edit#gid=756069733
For attribute file:
Assume "Property" column exist and it is the node column
Assume "Attribute" column exist and it is the label column
For unit file:
Assume "Unit" column exist and it is the node column
Assume "Q-Node" column exist and it is the label column
"""
stop_punctuation = string.punctuation
TRANSLATOR = str.maketrans(stop_punctuation, ' ' * len(stop_punctuation))
# deprecated! not use
def load_csvs(dataset_file: str, attributes_file: str, units_file: str):
loaded_file = {}
files = [dataset_file, attributes_file, units_file]
file_type = ["dataset_file", "attributes_file", "units_file"]
for each_file, each_file_type in zip(files, file_type):
if each_file:
if not os.path.exists(each_file):
raise ValueError("{} {} not exist!".format(each_file_type, each_file))
loaded_file[each_file_type] = pd.read_csv(each_file)
return loaded_file
def load_xlsx(input_file: str, sheet_name_config: dict = None):
loaded_file = {}
sheet_names = pd.ExcelFile(input_file).sheet_names
if not sheet_name_config:
sheet_name_config = {"dataset_file": "Dataset",
"attributes_file": "Attributes",
"units_file": "Units",
"extra_edges": "Extra Edges"
}
for k, v in sheet_name_config.items():
if v not in sheet_names:
raise ValueError("Sheet name {} used for {} does not found!".format(v, k))
loaded_file[k] = pd.read_excel(input_file, v)
optional_sheet_name_config = {
"wikifier": "Wikifier",
"qualifiers": "Qualifiers",
"Wikifier_t2wml": "Wikifier_t2wml",
"Wikifier Columns": "Wikifier Columns"
}
for k, v in optional_sheet_name_config.items():
if v not in sheet_names:
loaded_sheet = None
else:
loaded_sheet = pd.read_excel(input_file, v)
loaded_file[k] = loaded_sheet
return loaded_file
def generate(loaded_file: dict, output_path: str = ".", column_name_config=None, to_disk=True,
datamart_properties_file: str = None, dataset_qnode: str = None, dataset_id: str = None,
debug: bool = False,
) -> typing.Optional[dict]:
"""
The main entry function for generating datamart files from template input,
base on input parameter `to_disk`, the output can be None or dict of dataframe
"""
if column_name_config is None:
column_name_config = {}
if "attributes_file_node_column_name" not in column_name_config:
column_name_config["attributes_file_node_column_name"] = "Property"
if "attributes_file_node_label_column_name" not in column_name_config:
column_name_config["attributes_file_node_label_column_name"] = "Attribute"
if "unit_file_node_column_name" not in column_name_config:
column_name_config["unit_file_node_column_name"] = "Q-Node"
if "unit_file_node_label_column_name" not in column_name_config:
column_name_config["unit_file_node_label_column_name"] = "Unit"
if len(loaded_file["dataset_file"]["dataset"].unique()) > 1:
raise ValueError("One dataset file should only contains 1 dataset ID in `dataset` column.")
if loaded_file["wikifier"] is not None:
extra_wikifier_dict = get_wikifier_part(loaded_file["wikifier"])
else:
extra_wikifier_dict = {}
# update 2020.7.22: accept user specified dataset id if given
if dataset_qnode is None:
dataset_qnode = loaded_file["dataset_file"]["dataset"].iloc[0]
if len(dataset_qnode) == 0 or not dataset_qnode[0] == 'Q':
raise Exception('First column of "Dataset" tab has be Qnodes')
if dataset_id is None:
# dataset_id = loaded_file["dataset_file"]["dataset"].iloc[0]
result = loaded_file['dataset_file']['node2'][loaded_file["dataset_file"]['label'] == 'P1813']
if len(result) == 0:
raise Exception('Missing dataset identifier. Missing "P1813" edge in "Dataset" tab')
else:
dataset_id = result.iloc[0]
if dataset_id[0] == '"' and dataset_id[-1] == '"':
dataset_id = dataset_id[1:-1]
# generate files
memo = defaultdict(dict)
kgtk_properties_df = _generate_KGTK_properties_file(loaded_file["attributes_file"],
loaded_file["qualifiers"],
dataset_qnode, dataset_id,
memo, column_name_config["attributes_file_node_column_name"],
column_name_config["attributes_file_node_label_column_name"])
kgtk_variables_df = _generate_KGTK_variables_file(loaded_file["attributes_file"],
dataset_qnode, dataset_id, memo,
column_name_config["attributes_file_node_column_name"],
column_name_config["attributes_file_node_label_column_name"])
kgtk_units_df = _generate_KGTK_units_file(loaded_file["units_file"], dataset_qnode, memo,
column_name_config["unit_file_node_column_name"],
column_name_config["unit_file_node_label_column_name"])
wikifier_df = _generate_wikifier_file(memo, extra_wikifier_dict)
if loaded_file["Wikifier_t2wml"] is not None:
wikifier_df = pd.concat([wikifier_df, loaded_file["Wikifier_t2wml"]])
dataset_df = _generate_dataset_file(loaded_file["dataset_file"])
extra_edges_df = _generate_extra_edges_file(loaded_file["extra_edges"], memo)
output_files = {"kgtk_properties.tsv": kgtk_properties_df,
"kgtk_variables.tsv": kgtk_variables_df,
"kgtk_units.tsv": kgtk_units_df,
"wikifier.csv": wikifier_df,
"extra_edges.tsv": extra_edges_df,
"dataset.tsv": dataset_df}
if datamart_properties_file is not None:
datamart_schema_df = pd.read_csv(datamart_properties_file, sep='\t')
output_files['datamart_schema_properties.tsv'] = datamart_schema_df
# save to disk if required or running in debug mode
if to_disk or debug:
os.makedirs(output_path, exist_ok=True)
for each_file_name, each_file in output_files.items():
output_file_path = os.path.join(output_path, each_file_name)
if each_file_name.endswith(".csv"):
each_file.to_csv(output_file_path, index=False)
elif each_file_name.endswith(".tsv"):
each_file.to_csv(output_file_path, sep='\t', index=False, quoting=csv.QUOTE_NONE)
if not to_disk:
return output_files
def _generate_KGTK_properties_file(input_df: pd.DataFrame, qualifier_df: pd.DataFrame,
dataset_q_node: str, dataset_id: str, memo: dict,
node_column_name="Property", node_label_column_name="Attribute",
qualifier_column_name="Qualifiers") -> pd.DataFrame:
"""
sample format for each property (totally 3 rows)
Please note that data type may change (to String, Date) base on the given input template file
id node1 label node2
0 Paid-security-002-data_type Paid-security-002 data_type Quantity
1 Paid-security-002-P31 Paid-security-002 P31 Q18616576
2 Paid-security-002-label Paid-security-002 label UN
:return: kgtk format property dataframe
"""
node_number = 1
output_df_list = []
input_df = input_df.fillna("")
has_relationship = 'Relationship' in input_df.columns and 'Role' in input_df.columns
for _, each_row in input_df.iterrows():
node_number += 1
if has_relationship:
role = each_row["Role"].upper()
else:
role = ""
if each_row[node_column_name] == "":
node_label = to_kgtk_format_string(each_row[node_label_column_name])
node_id = _generate_p_nodes(role, dataset_q_node, node_number, memo, each_row['Attribute'])
# get type if specified
if "type" in each_row:
value_type = each_row["type"]
else:
value_type = "Quantity"
labels = ["wikidata_data_type", "data_type", "P31", "label"]
node2s = [value_type, value_type, "Q18616576", node_label]
for i in range(len(labels)):
id_ = "{}-{}".format(node_id, labels[i])
output_df_list.append({"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]})
else:
node_id = each_row[node_column_name]
# add to memo for future use
memo["property"][node_id] = each_row[node_label_column_name]
if "Role" in each_row:
memo["property_role"][node_id] = each_row["Role"].lower()
# add qualifier part if we have
if qualifier_df is not None:
qualifier_df = qualifier_df.fillna("")
for _, each_row in qualifier_df.iterrows():
node_number += 1
if each_row[node_column_name] == "":
node_id = _generate_p_nodes("QUALIFIER", dataset_q_node, node_number, memo, each_row["Attribute"])
memo["qualifier_target_nodes"][each_row[qualifier_column_name]] = memo["property_name_to_id"][
each_row[node_label_column_name]]
memo["qualifier_name_to_id"][each_row[qualifier_column_name]] = node_id
memo["property"][node_id] = each_row[qualifier_column_name]
labels = ["data_type", "P31", "label"]
node2s = ["String", "Q18616576", to_kgtk_format_string(each_row[qualifier_column_name])]
for i in range(3):
id_ = "{}-{}".format(node_id, labels[i])
output_df_list.append({"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]})
else:
memo["property"][each_row[node_column_name]] = each_row[qualifier_column_name]
memo["qualifier_name_to_id"][each_row[qualifier_column_name]] = each_row[node_column_name]
memo["qualifier_target_nodes"][each_row[qualifier_column_name]] = memo["property_name_to_id"][
each_row[node_label_column_name]]
# get output
output_df = pd.DataFrame(output_df_list)
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_KGTK_variables_file(input_df: pd.DataFrame, dataset_q_node: str, dataset_id: str, memo: dict,
node_column_name="Property", node_label_column_name="Attribute"):
"""
sample format for each variable, totally 10 + n (n is the count of related qualifiers) rows
"id" "node1" "label" "node2"
0 QVARIABLE-OECD-002-label QVARIABLE-002 label "GDP per capita"
1 QVARIABLE-OECD-002-P1476 QVARIABLE-002 P1476 "GDP per capita"
2 QVARIABLE-OECD-002-description QVARIABLE-002 description "GDP per capita variable in OECD"
3 QVARIABLE-OECD-002-P31-1 QVARIABLE-002 P31 Q50701
4 QVARIABLE-OECD-002-P2006020002-P248 QVARIABLE-002 P2006020002 P585
5 QVARIABLE-OECD-002-P2006020002-P248 QVARIABLE-002 P2006020002 P248
6 QVARIABLE-OECD-002-P1687-1 QVARIABLE-002 P1687 PVARIABLE-OECD-002
7 QVARIABLE-OECD-002-P2006020004-1 QVARIABLE-002 P2006020004 QOECD
8 QVARIABLE-OECD-002-P1813 QVARIABLE-002 P1813 "gdp_per_capita"
9 QVARIABLE-OECD-P2006020003-QOECD002 QVARIABLE P2006020003 QOECD-002
-------------------------------------------------
10 QVARIABLE-OECD-P2006020002-PQUALIFIER-OECD-101 QVARIABLE P2006020003 PQUALIFIER-OECD-101
11 QVARIABLE-OECD-P2006020002-PQUALIFIER-OECD-102 QVARIABLE P2006020003 PQUALIFIER-OECD-102
-------------------------------------------------
12 ... QVARIABLE-002 P2010050001 FactorClass:EconomicAgricuturalCapability
13 ... QVARIABLE-002 P2010050001 Relevance:1
"""
node_number = 1
output_df_list = []
short_name_memo = set()
input_df = input_df.fillna("")
all_qualifier_properties = []
for node, role in memo["property_role"].items():
if role == "qualifier":
all_qualifier_properties.append(node)
has_relationship = 'Relationship' in input_df.columns and 'Role' in input_df.columns
for _, each_row in input_df.iterrows():
if has_relationship:
role = each_row["Role"].upper()
else:
role = ""
# not add QUALIFIER to variables tab
if has_relationship and role == "QUALIFIER":
continue
target_properties = []
# update 2020.7.22: consider role and relationship for new template file
if has_relationship:
relations = each_row['Relationship']
# qualifier should not have qualifier properties
if each_row['Role'].lower() != "qualifier":
if relations == "":
target_properties = all_qualifier_properties
else:
for each_relation in relations.slipt("|"):
if each_relation not in memo["property_name_to_id"]:
raise ValueError(
"Annotation specify variable {} not exist in input data.".format(each_relation))
target_properties.append(memo["property_name_to_id"][each_relation])
node_number += 1
if each_row[node_column_name] == "":
# update 2020.7.23, also add role for P nodes
p_node_id = _generate_p_nodes(role, dataset_q_node, node_number, memo, each_row["Attribute"])
else:
p_node_id = each_row[node_column_name]
# update 2020.7.22: change to add role in Q node id
q_node_id = _generate_q_nodes(role, dataset_q_node, node_number)
memo["variable"][q_node_id] = each_row[node_label_column_name]
fixed_labels = ["label", "P1476", "description", # 1-3
"P31", "P2006020002", "P2006020002", # 4-6
"P1687", "P2006020004", "P1813", # 7-9
"P2006020003"]
labels = fixed_labels + len(target_properties) * ["P2006020002"]
if each_row['label'] == "":
node2_label = to_kgtk_format_string(each_row[node_label_column_name])
else:
node2_label = to_kgtk_format_string(each_row['label'])
if each_row['description'] == "":
node2_description = to_kgtk_format_string("{} in {}".format(each_row[node_label_column_name], dataset_id))
else:
node2_description = to_kgtk_format_string(each_row['description'])
node2s = [node2_label, # to_kgtk_format_string(each_row[node_label_column_name]), # 1
node2_label, # to_kgtk_format_string(each_row[node_label_column_name]), # 2
node2_description,
# to_kgtk_format_string("{} in {}".format(each_row[node_label_column_name], dataset_id)), # 3
"Q50701", "P585", "P248", # 4(Q50701 = variable), 5(P585 = Point in time), 6(P249 = stated in)
p_node_id, # 7
dataset_q_node, # 8
to_kgtk_format_string(get_short_name(short_name_memo, each_row[node_label_column_name])), # 9
q_node_id # 10
] + target_properties
node1s = [q_node_id] * (len(fixed_labels) - 1) + [dataset_q_node] + [q_node_id] * len(target_properties)
# Add tag edges
if 'tag' in input_df.columns and each_row['tag']:
tag_values = [to_kgtk_format_string(x) for x in each_row['tag'].split('|')]
node1s += [q_node_id] * len(tag_values)
labels += ['P2010050001'] * len(tag_values)
node2s += tag_values
# add those nodes
for i, each_label in enumerate(labels):
id_ = _generate_edge_id(node1s[i], labels[i], node2s[i])
output_df_list.append({"id": id_, "node1": node1s[i], "label": labels[i], "node2": node2s[i]})
# get output
output_df = pd.DataFrame(output_df_list)
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_KGTK_units_file(input_df: pd.DataFrame, dataset_q_node: str, memo: dict, node_column_name="Q-Node",
node_label_column_name="Unit") -> pd.DataFrame:
"""
sample format for each unit (totally 2 rows)
id node1 label node2
0 QUNIT-aid-security-U002-label Qaid-security-U002 label person
1 QUNIT-aid-security-U002-P31 Qaid-security-U002 P31 Q47574
:return:
"""
node_number = 1
count = 0
output_df_dict = {}
input_df = input_df.fillna("")
for _, each_row in input_df.iterrows():
node_number += 1
if each_row[node_column_name] == "":
# update 2020.7.22: change to use QUNIT* instead of Q*
node_id = _generate_q_nodes("UNIT", dataset_q_node, node_number)
labels = ["label", "P31"]
node2s = [to_kgtk_format_string(each_row[node_label_column_name]), "Q47574"]
memo["unit"][node_id] = each_row[node_label_column_name]
for i in range(2):
id_ = _generate_edge_id(node_id, labels[i], node2s[i])
output_df_dict[count] = {"id": id_, "node1": node_id, "label": labels[i], "node2": node2s[i]}
count += 1
else:
memo["unit"][each_row[node_column_name]] = each_row[node_label_column_name]
# get output
output_df = pd.DataFrame.from_dict(output_df_dict, orient="index")
# in case of empty df
if output_df.shape == (0, 0):
output_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
return output_df
def _generate_wikifier_file(memo, extra_wikifier_dict):
"""
generate the wikifier part from template(those properties, variables, units generated in above functions)
Sample file looks like:
column row value context item
0 "" "" UN property Paid-security-002
1 "" "" INGO property Paid-security-003
2 "" "" LNGO/NRCS property Paid-security-004
3 "" "" ICRC property Paid-security-005
4 "" "" UN variable Qaid-security-002
5 "" "" INGO variable Qaid-security-003
6 "" "" person unit Qaid-security-U002
"""
output_df_list = []
for memo_type, each_memo in memo.items():
if memo_type in {"property", "unit", "variable"}:
for node, label in each_memo.items():
output_df_list.append({"column": "", "row": "", "value": label, "context": memo_type, "item": node})
# for those specific alias of wikifier names
combo = (label, memo_type)
if combo in extra_wikifier_dict:
output_df_list.append(
{"column": "", "row": "", "value": extra_wikifier_dict[combo], "context": memo_type,
"item": node})
# get output
output_df = pd.DataFrame(output_df_list)
return output_df
def _generate_dataset_file(input_df: pd.DataFrame):
"""
A sample dataset file looks like:
node1 label node2 id
Qaid-security P31 Q1172284 aid-security-P31
Qaid-security label aid-security dataset aid-security-label
Qaid-security P1476 aid-security dataset aid-security-P1476
Qaid-security description aid-security dataset aid-security-description
Qaid-security P2699 aid-security aid-security-P2699
Qaid-security P1813 aid-security aid-security-P1813
:return:
"""
output_df = copy.deepcopy(input_df)
ids = []
for _, each_row in output_df.iterrows():
ids.append("{}-{}".format(each_row["dataset"], each_row["label"]))
output_df['id'] = ids
# Assume the the first column are already Q nodes
# output_df["dataset"] = output_df['dataset'].apply(lambda x: "Q" + x)
output_df = output_df.rename(columns={"dataset": "node1"})
# check double quotes
output_df = _check_double_quotes(output_df, check_content_startswith=True)
return output_df
def _generate_extra_edges_file(input_df: pd.DataFrame, memo: dict):
qualifier_extra_edges_list = []
if "qualifier_target_nodes" in memo:
for k, v in memo['qualifier_target_nodes'].items():
qualifier_extra_edges_list.append({"id": "", "node1": v, "label": "P2006020002",
"node2": memo["qualifier_name_to_id"][k]})
output_df = pd.concat([input_df, pd.DataFrame(qualifier_extra_edges_list)])
# check double quotes
output_df = _check_double_quotes(output_df, label_types={"label", "description"})
return output_df
# update 2020.7.24, add support of run wikifier and record t2wml wikifier file in template
def run_wikifier(input_folder_path: str, wikifier_columns_df: pd.DataFrame, template_output_path: str):
"""
run wikifier on all table files(csv, xlsx, xls) and add the new wikifier results to "wikifier.csv" file
:param input_folder_path:
:param wikifier_columns_df:
:param template_output_path:
:return:
"""
new_wikifier_df_list = []
input_data = []
for each_file in os.listdir(input_folder_path):
if each_file.startswith("~") or each_file.startswith("."):
continue
each_file = os.path.join(input_folder_path, each_file)
if each_file.endswith(".csv"):
input_data.append(pd.read_csv(each_file, header=None))
elif each_file.endswith(".xlsx") or each_file.endswith("xls"):
for each_sheet in get_sheet_names(each_file):
input_data.append(pd.read_excel(each_file, each_sheet, header=None))
for each_df in input_data:
each_df = each_df.fillna("")
# get only data part that need to be parsed
for _, each_row in wikifier_columns_df.iterrows():
target_column_number = ord(each_row['Columns']) - ord("A")
start_row, end_row = each_row["Rows"].split(":")
start_row = int(start_row) - 1
if end_row == "":
end_row = len(each_df)
else:
end_row = int(end_row)
if target_column_number >= each_df.shape[1] or end_row > each_df.shape[0]:
print("Required to wikify on column No.{} and end row at {} but the input dataframe shape is only {}". \
format(target_column_number, end_row, each_df.shape))
continue
each_df = each_df.iloc[start_row:end_row, :]
# run wikifier
new_wikifier_df_list.extend(get_wikifier_result(input_df=each_df, target_col=target_column_number,
wikifier_type="country"))
wikified_values = set([each["value"] for each in new_wikifier_df_list])
remained_df_part = each_df[~each_df.iloc[:, target_column_number].isin(wikified_values)]
new_wikifier_df_list.extend(get_wikifier_result(input_df=remained_df_part, target_col=target_column_number,
wikifier_type="ethiopia"))
new_wikifier_df = pd.DataFrame(new_wikifier_df_list)
# combine the previous wikifier file if exists
output_wikifier_file_path = os.path.join(template_output_path, "wikifier.csv")
if os.path.exists(output_wikifier_file_path):
new_wikifier_df = pd.concat([pd.read_csv(output_wikifier_file_path), new_wikifier_df])
# save to disk
new_wikifier_df.to_csv(output_wikifier_file_path, index=False)
def get_short_name(short_name_memo, input_str):
words_processed = str(input_str).lower().translate(TRANSLATOR).split()
short_name = "_".join(words_processed)
if short_name[0].isnumeric():
short_name = "_" + short_name
i = 0
while short_name in short_name_memo:
i += 1
short_name = "_".join(words_processed) + "_{}".format(i)
short_name_memo.add(short_name)
return short_name
def get_wikifier_part(wikifier_input_df: pd.DataFrame):
result = {}
for _, each_row in wikifier_input_df.iterrows():
result[(each_row["attribute"], each_row["context"])] = (each_row["value"])
return result
def get_sheet_names(file_path):
"""
This function returns the first sheet name of the excel file
:param file_path:
:return:
"""
file_extension = Path(file_path).suffix
is_csv = True if file_extension.lower() == ".csv" else False
if is_csv:
return [Path(file_path).name]
xl = | pd.ExcelFile(file_path) | pandas.ExcelFile |
# ==============================================================================
# This file defines a class that contains a method that takes in a
# file name and then using delta loads the wave files for that subject.
#
# Based on a configuration dictionary that is provided to the method it
# will segregate the files up into a train and test set.
#
# author: <NAME>
# ==============================================================================
import pandas as pd
import delta.compat as tf
from pathlib import Path
from delta.data.frontend.read_wav import ReadWav
# ==============================================================================
# Description:
# This method takes in a file name and a set of configuration data and loads
# the wav file.
#
# author: <NAME>
#
# inputs:
# fileName - The file name of the wav file to load.
# func_config - A dictonary containing settings.
#
# output:
# The loaded wav object
# ==============================================================================
def LoadWavFile(fileName, func_config):
config = {'sample_rate': func_config['sample_rate']}
read_wav = ReadWav.params(config).instantiate()
data, srate = read_wav(fileName)
return data
# ==============================================================================
# This method loads wave files into a pandas array.
#
# Based on a configuration dictionary that is provided to the method it
# will segregate the files up into a train and test set.
#
# author: <NAME>
# ==============================================================================
# ==============================================================================
# Description:
# This file loads the subject numbers from a file and then load all of
# the requsted files for each of the listed subjects into a DataFrame.
#
# author: <NAME>
#
# inputs:
# fileName - The file name of the wav file to load.
# func_config - A dictonary containing settings.
#
# output:
# A data frame containing the subject ID, sample rate of the file and the
# wav file loaded.
# ==============================================================================
def LoadSubjects(fileName, func_config):
dfData = | pd.DataFrame({}, columns=['Subject', 'sample_rate', 'wavFile']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Dirichlet Mixing Module v1.2
# Implemented by <NAME>, based on original MatLab code by <NAME>.
# Mathematics described in Rudge et al.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Mean composition of melts from all lithologies
def mean_comp_total(f,w,c):
return np.sum(f*c*w)
# Import a Melts output file
def Import_Melts_output(filename,dX=0.0001,RudgeMethod=False):
"""Import a Melts csv file and recasts the melting column in terms of equal
dX increments. Converts pressure from bars to GPa.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
RudgeMethod: bool
Use the Rudge method for calculating melt fraction from the MELTS input.
I think this method is erroneous, but have kept it so his results may be
reproduced.
"""
meltsFile = pd.read_csv(filename,skiprows=1)
if RudgeMethod == True:
# Calculate Melt Fraction
meltsFile['F'] = (100-meltsFile.Mass)/100
# Calculate residual porosity
ResidualPorosity = meltsFile.F.iloc[0]
X = (meltsFile.F - ResidualPorosity)/(1-ResidualPorosity)
else:
X = (meltsFile.Mass[0]-meltsFile.Mass)/meltsFile.Mass[0]
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
MeltingBounds = [0,0]
MeltingBounds[0] = np.argmin(X[X>0]) - 1
MeltingBounds[1] = np.argmax(X)
# Make list of columns for new array
columns = ['X','P','T']
columns = columns + (meltsFile.columns[3:].tolist())
# Set up list of X values to map all other variables to
X_to_map = np.arange(X[MeltingBounds[0]],X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d = pd.DataFrame(EmptyColumns, columns=columns)
# Start filling Dataframe
d.X = X_to_map
d['T'] = np.interp(d.X,X,meltsFile.Temperature)
d['P'] = np.interp(d.X,X,meltsFile.Pressure)/1e4
# Map all the chemistry to the new X variable
for col in columns[3:]:
d[col] = np.interp(d.X,X,meltsFile[col])
return d
# Import a Melts output file
def Import_Katz_output(filename,dX=0.0001,MajorElements=pd.Series([7.48,8.51],index=['MgO','FeO']),WalterComps=False,file=True):
"""Import a numpy file generated by the single melting region function of
the DualLithologyMelting script and recasts the melting column in terms of equal
dX increments.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
MajorElements: series
Major Element concentrations to add to each fractional melt. Same composition
will apply to all melts. MgO and FeO must always be set, otherwise some
functionality of the dirichlet module won't work (but can be ignored).
WalterComps: bool
If true the major element composition of the melts will be calculated using
the parameterisation of the Walter KR4003 melting experiments by Duncan et al.
(2017).
file: bool
If true filename is interpreted as a file name, if false, filename is interpreted
as the array object itself.
"""
if file == True:
mr_raw = np.load(filename)
else:
mr_raw = filename
mr = np.zeros([3,np.shape(mr_raw)[1]])
mr[0] = mr_raw[0]
mr[1] = mr_raw[1]
mr[2] = mr_raw[3]
mr = pd.DataFrame(mr.T,columns=['P','T','X'])
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
MeltingBounds = [0,0]
MeltingBounds[0] = np.argmin(mr.X[mr.X>0]) - 1
MeltingBounds[1] = np.argmax(mr.X)
# Make list of columns for new array
columns = ['X','P','T']
if WalterComps == False:
columns = columns + MajorElements.index.tolist()
# Set up list of X values to map all other variables to
X_to_map = np.arange(mr.X[MeltingBounds[0]],mr.X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d = | pd.DataFrame(EmptyColumns, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.display import HTML
def data():
df = pd.read_stata('data, R and html files/1section.dta')
df['loansgdp'] = df.tloans/df.gdp
return df.head()
def fig21():
# dynamics of loans to gdp ratio over time for USA, Germany, Australia, France
df = pd.read_stata('data, R and html files/1section.dta')
usa = df[df.country == 'USA']
ger = df[df.country == 'Germany']
aus = df[df.country == 'Australia']
fr = df[df.country == 'France']
uk = df[df.country == 'UK']
sp = df[df.country == 'Spain']
fig = plt.figure(figsize=(10,5))
plt.suptitle('Figure 2.1 Ratio of private loans to GDP in selected developed economies')
plt.subplot(2, 3, 1)
plt.plot(usa.year, usa.loansgdp, color='c')
plt.title('USA')
plt.subplot(2, 3, 2)
plt.plot(ger.year, ger.loansgdp, color='c')
plt.title('Germany')
plt.subplot(2, 3, 3)
plt.plot(aus.year, aus.loansgdp, color='c')
plt.title('Australia')
plt.subplot(2, 3, 4)
plt.plot(fr.year, fr.loansgdp, color='c')
plt.title('France', y=-0.35)
plt.subplot(2, 3, 5)
plt.plot(uk.year, uk.loansgdp, color='c')
plt.title('UK', y=-0.35)
plt.subplot(2, 3, 6)
plt.plot(sp.year, sp.loansgdp, color='c')
plt.title('Spain', y=-0.35)
fig.text(0.05,-0.08,'Source: Jorda et al.(2013) set');
plt.show()
def prep1():
#preparation of excess credit dummy
df = pd.read_stata('data, R and html files/1section.dta')
df['loansgdp'] = df.tloans/df.gdp
df['diff_loansgdp'] = 100*df['loansgdp'].diff()
loans_table = df.pivot_table(values = 'diff_loansgdp', index = 'country', columns = 'year')
pd.options.display.float_format = '{:,.3f}'.format
mean_year = loans_table.mean()
mean_all = mean_year.mean()
df['excredit'] = (df['diff_loansgdp'] > mean_all).astype(int)
#data preparation
df['year'] = df['year'].astype('int')
df['pk_fin'] = df['pk_fin'].astype('int')
df['pk_norm'] = df['pk_norm'].astype('int')
df['lrgdp'] = np.log(df.rgdp)
#copy to the file for R
df.to_stata('data, R and html files/1section1.dta')
df = df.loc[(df['pk_fin'] == 1) & (df['excredit'] == 1)]
df = df[['year','country', 'pk_norm', 'pk_fin', 'excredit', 'pop', 'gdp', 'rgdp', 'lrgdp', 'tloans', 'loansgdp', 'diff_loansgdp', 'dlcpi', 'dlriy', 'stir', 'ltrate', 'ldlrgdp', 'ldlcpi', 'ldlriy', 'lstir', 'lltrate']]
return df.head()
def data2():
#obtaining the data
df = pd.read_stata('data, R and html files/2section.dta')
pd.set_option('display.float_format', lambda x: '%.3f' % x)
df['year'] = df['year'].astype('int')
df = df[['year', 'state', 'stateid', 'Total_GSP', 'cpi', 'population', 'spend', 'employ_CES', 'Dcpi_ACCRA']]
#state output
#output in millions
df['out'] = df.Total_GSP*1000000
#real output
df['rout'] = df.out/df.cpi
#real output per capita
df['rcapout'] = df.rout/df.population
#percent change of real output per capita
df['Drcapout'] = df.sort_values(['year']).groupby('state')['rcapout'].pct_change()
#state spending
#real spending
df['rspend'] = df.spend/df.cpi
#real spending per capita
df['rcapspend'] = df.rspend/df.population
#real p.c.spending change as % of real p.c. output
df['rcapspend_lag'] = df.sort_values('year').groupby(['state'])['rcapspend'].shift(1)
df['rcapout_lag'] = df.sort_values('year').groupby(['state'])['rcapout'].shift(1)
df['Drcapspend'] = (df.rcapspend - df.rcapspend_lag)/df.rcapout_lag
#state employment
#employment in thousands
df['emp'] = df.employ_CES*1000
#% change in employment rate
df['emp_lag'] = df.sort_values('year').groupby(['state'])['emp'].shift(1)
df['population_lag'] = df.sort_values('year').groupby(['state'])['population'].shift(1)
df['Demp'] = (df.emp/df.population - df.emp_lag/df.population_lag)/(df.emp_lag/df.population_lag)
#state population
#% change of population
df['Dpop'] = (df.population-df.population_lag)/df.population_lag
#aggregate population
df1 = df.groupby('year')['population'].sum()
df1 = df1.to_frame().reset_index()
df1 = pd.concat([df1]*51)
df1 = df1.drop(columns="year")
df1 = df1.to_numpy()
df['population_nat'] = df1
#aggregate output
#aggregation of real output
df1 = df.groupby('year')['rout'].sum()
df1 = df1.to_frame().reset_index()
df1 = pd.concat([df1]*51)
df1 = df1.drop(columns="year")
df1 = df1.to_numpy()
df['rout_nat'] = df1
#agg real output per capita
df['rcapout_nat'] = df.rout_nat/df.population_nat
#percent change of real output per capita
df['Drcapout_nat'] = df.sort_values(['year']).groupby('state')['rcapout_nat'].pct_change()
#aggregate spending
#aggregation of real spending
summ_spend = df.groupby('year')['rspend'].sum()
summ_spend = summ_spend.to_frame().reset_index()
df1 = pd.concat([summ_spend]*51)
df1 = df1.drop(columns="year")
df1 = df1.to_numpy()
df['rspend_nat'] = df1
#total national real spending per capita
df['rcapspend_nat'] = df.rspend_nat/df.population_nat
#change of national real (work) p.c. spending as % of real national output p.c.
df['rcapspend_nat_lag'] = df.sort_values('year').groupby(['state'])['rcapspend_nat'].shift(1)
df['rcapout_nat_lag'] = df.sort_values('year').groupby(['state'])['rcapout_nat'].shift(1)
df['Drcapspend_nat'] = (df.rcapspend_nat - df.rcapspend_nat_lag)/df.rcapout_nat_lag
df2 = df[['year', 'state', 'Drcapout', 'Drcapspend', 'Drcapspend_nat', 'Demp', 'Dpop']]
df2.to_stata('data, R and html files/2section2.dta')
return df2.head()
def fig31():
#obtaining the data
df = pd.read_stata('data, R and html files/2section.dta')
pd.set_option('display.float_format', lambda x: '%.3f' % x)
df['year'] = df['year'].astype('int')
df = df[['year', 'state', 'stateid', 'Total_GSP', 'cpi', 'population', 'spend', 'employ_CES', 'Dcpi_ACCRA']]
#state output
#output in millions
df['out'] = df.Total_GSP*1000000
#real output
df['rout'] = df.out/df.cpi
#state spending
#real spending
df['rspend'] = df.spend/df.cpi
#real spending per capita
#data for states
states = df[['year', 'state', 'rspend', 'rout']]
IL = states[states.state == 'IL']
CA = states[states.state == 'CA']
#aggregate data
summsp = df.groupby('year')['rspend'].sum()
summsp = summsp.to_frame().reset_index()
summout = df.groupby('year')['rout'].sum()
summout = summout.to_frame().reset_index()
summout = summout.drop(columns="year")
summout = summout.to_numpy()
summsp['rout'] = summout
summsp['state'] = "All"
#merge 3 datasets
st = | pd.concat([IL, CA]) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
import seaborn
import scipy.io as scio
def load_dataset():
breastw_path = 'duibi_dataset/breastw.mat'
data = scio.loadmat(breastw_path)
data_train_data = data.get('X') # 取出字典里的data
data_train_label = data.get('y') # 取出字典里的label
ccdata = | pd.DataFrame(data_train_data) | pandas.DataFrame |
from arche import SH_URL
from arche.readers.items import Items, CollectionItems, JobItems
from conftest import Collection, Job
import numpy as np
import pandas as pd
import pytest
@pytest.mark.parametrize(
"df, expected_raw, expected_df",
[
(pd.DataFrame({"0": [0]}), [{"0": 0}], pd.DataFrame({"0": [0]})),
(
pd.DataFrame({"0": [0], "_key": ["0"]}),
[{"0": 0, "_key": "0"}],
| pd.DataFrame({"0": [0], "_key": ["0"]}) | pandas.DataFrame |
import sys
sys.path.append('../mss')
import matplotlib.pyplot as plt
import visreader as mvis
import mssmain as mss
import pandas as pd
import numpy as np
from tqdm import tqdm
def mz_selector(scans, mz_list, export_name):
sample_list = []
for mz in tqdm(mz_list):
rt, i = ms_chromatogram_list(scans, mz, 20)
count = 0
for ints in i:
if ints >= 5000:
count += 1
if count == 7:
sample_list.append([mz, i])
break
else:
count = 0
continue
d_sample = | pd.DataFrame(sample_list) | pandas.DataFrame |
import unittest
from unittest.mock import patch, PropertyMock
import time
import mt5_correlation.correlation as correlation
import pandas as pd
from datetime import datetime, timedelta
from test_mt5 import Symbol
import random
import os
class TestCorrelation(unittest.TestCase):
# Mock symbols. 4 Symbols, 3 visible.
mock_symbols = [Symbol(name='SYMBOL1', visible=True),
Symbol(name='SYMBOL2', visible=True),
Symbol(name='SYMBOL3', visible=False),
Symbol(name='SYMBOL4', visible=True),
Symbol(name='SYMBOL5', visible=True)]
# Start and end date for price data and mock prices: base; correlated; and uncorrelated.
start_date = None
end_date = None
price_columns = None
mock_base_prices = None
mock_correlated_prices = None
mock_uncorrelated_prices = None
def setUp(self):
"""
Creates some price data fro use in tests
:return:
"""
# Start and end date for price data and mock price dataframes. One for: base; correlated; uncorrelated and
# different dates.
self.start_date = datetime(2021, 1, 1, 1, 5, 0)
self.end_date = datetime(2021, 1, 1, 11, 30, 0)
self.price_columns = ['time', 'close']
self.mock_base_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_uncorrelated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_different_dates = pd.DataFrame(columns=self.price_columns)
self.mock_inverse_correlated_prices = pd.DataFrame(columns=self.price_columns)
# Build the price data for the test. One price every 5 minutes for 500 rows. Base will use min for price,
# correlated will use min + 5 and uncorrelated will use random
for date in (self.start_date + timedelta(minutes=m) for m in range(0, 500*5, 5)):
self.mock_base_prices = self.mock_base_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute]]))
self.mock_correlated_prices = \
self.mock_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute + 5]]))
self.mock_uncorrelated_prices = \
self.mock_uncorrelated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, random.randint(0, 1000000)]]))
self.mock_correlated_different_dates = \
self.mock_correlated_different_dates.append(pd.DataFrame(columns=self.price_columns,
data=[[date + timedelta(minutes=100),
date.minute + 5]]))
self.mock_inverse_correlated_prices = \
self.mock_inverse_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, (date.minute + 5) * -1]]))
@patch('mt5_correlation.mt5.MetaTrader5')
def test_calculate(self, mock):
"""
Test the calculate method. Uses mock for MT5 symbols and prices.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Correlation class
cor = correlation.Correlation(monitoring_threshold=1, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We don't have a SYMBOL3 as this is set as not visible. Correlations should be as follows:
# SYMBOL1:SYMBOL2 should be fully correlated (1)
# SYMBOL1:SYMBOL4 should be uncorrelated (0)
# SYMBOL1:SYMBOL5 should be negatively correlated
# SYMBOL2:SYMBOL5 should be negatively correlated
# We will not use p_value as the last set uses random numbers so p value will not be useful.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_uncorrelated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Test the output. We should have 6 rows. S1:S2 c=1, S1:S4 c<1, S1:S5 c=-1, S2:S5 c=-1. We are not checking
# S2:S4 or S4:S5
self.assertEqual(len(cor.coefficient_data.index), 6, "There should be six correlations rows calculated.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL2'), 1,
"The correlation for SYMBOL1:SYMBOL2 should be 1.")
self.assertTrue(cor.get_base_coefficient('SYMBOL1', 'SYMBOL4') < 1,
"The correlation for SYMBOL1:SYMBOL4 should be <1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL5'), -1,
"The correlation for SYMBOL1:SYMBOL5 should be -1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL2', 'SYMBOL5'), -1,
"The correlation for SYMBOL2:SYMBOL5 should be -1.")
# Monitoring threshold is 1 and we are monitoring inverse. Get filtered correlations. There should be 3 (S1:S2,
# S1:S5 and S2:S5)
self.assertEqual(len(cor.filtered_coefficient_data.index), 3,
"There should be 3 rows in filtered coefficient data when we are monitoring inverse "
"correlations.")
# Now aren't monitoring inverse correlations. There should only be one correlation when filtered
cor.monitor_inverse = False
self.assertEqual(len(cor.filtered_coefficient_data.index), 1,
"There should be only 1 rows in filtered coefficient data when we are not monitoring inverse "
"correlations.")
# Now were going to recalculate, but this time SYMBOL1:SYMBOL2 will have non overlapping dates and coefficient
# should be None. There shouldn't be a row. We should have correlations for S1:S4, S1:S5 and S4:S5
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_different_dates,
self.mock_correlated_prices, self.mock_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
self.assertEqual(len(cor.coefficient_data.index), 3, "There should be three correlations rows calculated.")
self.assertEqual(cor.coefficient_data.iloc[0, 2], 1, "The correlation for SYMBOL1:SYMBOL4 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[1, 2], 1, "The correlation for SYMBOL1:SYMBOL5 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[2, 2], 1, "The correlation for SYMBOL4:SYMBOL5 should be 1.")
# Get the price data used to calculate the coefficients for symbol 1. It should match mock_base_prices.
price_data = cor.get_price_data('SYMBOL1')
self.assertTrue(price_data.equals(self.mock_base_prices), "Price data returned post calculation should match "
"mock price data.")
def test_calculate_coefficient(self):
"""
Tests the coefficient calculation.
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Test 2 correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_prices)
self.assertEqual(coefficient, 1, "Coefficient should be 1.")
# Test 2 uncorrelated sets. Set p value to 1 to force correlation to be returned.
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_uncorrelated_prices, max_p_value=1)
self.assertTrue(coefficient < 1, "Coefficient should be < 1.")
# Test 2 sets where prices dont overlap
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_different_dates)
self.assertTrue(coefficient < 1, "Coefficient should be None.")
# Test 2 inversely correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_inverse_correlated_prices)
self.assertEqual(coefficient, -1, "Coefficient should be -1.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_get_ticks(self, mock):
"""
Test that caching works. For the purpose of this test, we can use price data rather than tick data.
Mock 2 different sets of prices. Get three times. Base, One within cache threshold and one outside. Set 1
should match set 2 but differ from set 3.
:param mock:
:return:
"""
# Correlation class to test
cor = correlation.Correlation()
# Mock the tick data to contain 2 different sets. Then get twice. They should match as the data was cached.
mock.copy_ticks_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices]
# We need to start and stop the monitor as this will set the cache time
cor.start_monitor(interval=10, calculation_params={'from': 10, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1}, cache_time=3)
cor.stop_monitor()
# Get the ticks within cache time and check that they match
base_ticks = cor.get_ticks('SYMBOL1', None, None)
cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(base_ticks.equals(cached_ticks),
"Both sets of tick data should match as set 2 came from cache.")
# Wait 3 seconds
time.sleep(3)
# Retrieve again. This one should be different as the cache has expired.
non_cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(not base_ticks.equals(non_cached_ticks),
"Both sets of tick data should differ as cached data had expired.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_start_monitor(self, mock):
"""
Test that starting the monitor and running for 2 seconds produces two sets of coefficient history when using an
interval of 1 second.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Create correlation class. We will set a divergence threshold so that we can test status.
cor = correlation.Correlation(divergence_threshold=0.8, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We dont have a SYMBOL2 as this is set as not visible. All pairs should be correlated for the purpose of this
# test.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# We will build some tick data for each symbol and patch it in. Tick data will be from 10 seconds ago to now.
# We only need to patch in one set of tick data for each symbol as it will be cached.
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s2 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
tick_data_s5 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s2 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]))
tick_data_s4 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.25]]))
tick_data_s5 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * -0.25]]))
starttime = starttime + timedelta(milliseconds=10*random.randint(0, 100))
price_base += 1
# Patch it in
mock.copy_ticks_range.side_effect = [tick_data_s1, tick_data_s2, tick_data_s4, tick_data_s5]
# Start the monitor. Run every second. Use ~10 and ~5 seconds of data. Were not testing the overlap and price
# data quality metrics here as that is set elsewhere so these can be set to not take effect. Set cache level
# high and don't use autosave. Timer runs in a separate thread so test can continue after it has started.
cor.start_monitor(interval=1, calculation_params=[{'from': 0.66, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1},
{'from': 0.33, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1}], cache_time=100, autosave=False)
# Wait 2 seconds so timer runs twice
time.sleep(2)
# Stop the monitor
cor.stop_monitor()
# We should have 2 coefficients calculated for each symbol pair (6), for each date_from value (2),
# for each run (2) so 24 in total.
self.assertEqual(len(cor.coefficient_history.index), 24)
# We should have 2 coefficients calculated for a single symbol pair and timeframe
self.assertEqual(len(cor.get_coefficient_history({'Symbol 1': 'SYMBOL1', 'Symbol 2': 'SYMBOL2',
'Timeframe': 0.66})),
2, "We should have 2 history records for SYMBOL1:SYMBOL2 using the 0.66 min timeframe.")
# The status should be DIVERGED for SYMBOL1:SYMBOL2 and CORRELATED for SYMBOL1:SYMBOL4 and SYMBOL2:SYMBOL4.
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL2') == correlation.STATUS_DIVERGED)
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL4') == correlation.STATUS_CORRELATED)
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL4') == correlation.STATUS_CORRELATED)
# We are monitoring inverse correlations, status for SYMBOL1:SYMBOL5 should be DIVERGED
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL5') == correlation.STATUS_DIVERGED)
@patch('mt5_correlation.mt5.MetaTrader5')
def test_load_and_save(self, mock):
"""Calculate and run monitor for a few seconds. Store the data. Save it, load it then compare against stored
data."""
# Correlation class
cor = correlation.Correlation()
# Patch symbol and price data, then calculate
mock.symbols_get.return_value = self.mock_symbols
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Patch the tick data
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
def constantsSetting():
output = {}
output["General"] = general() # loading dictionary with general, physical constants
output["Steam"] = steamProperties() # loading dictionary with steam properties constants
output["MainEngines"] = mainEngines(output) # loading dictionary with main-engine related constants
output["AuxEngines"] = auxiliaryEngines(output) # loading dictionary with auxiliary-engine related constants
output["OtherUnits"] = otherUnits(output)
return output
def general():
# GENERAL CONSTANTS
output = {}
output["R_0"] = 8.3140 # Ideal gas constant, in [J/mol*K]
output["R_AIR"] = output["R_0"] * 1000 / 29 # Air gas constant, in [J/kg*K]
output["K_AIR"] = 1.4 # Air specific heat ratio
output["CP_AIR"] = 1.015 # Air specific heat, in [kJ/kgK]
output["CP_EG"] = 1.11 # EG specific heat, in [kJ/kgK]
output["CP_LO"] = 2.1 # Lubricating oil specific heat, in [kW/kgK]
output["CP_WATER"] = 4.187 # Water specifi heat, in [kW/kgK]
output["RHO_W"] = 1000.0 # Water density, in [kg/m^3]
output["RHO_LO"] = 850.0 # Lubricating oil density, in [kg/m^3]
# output["RHO_HFO"] = np.mean([890, 919, 924, 926, 925, 921, 924, 918, 920, 919, 933]) # HFO density, in [kg/m^3]
output["AIR_STOIC"] = 14.7 # Stoichiometric ratio fuel/air for Diesel-type fuels
output["ETA_VOL"] = 0.97 # Assumption about volumetric efficiency
output["P_ATM"] = 101325 # Assumption on atmospheric pressure
output["T_STANDARD"] = 323 # Standard temperature, for the calculation of the exergy in the exhaust gas
output["ISO"] = {"LHV": 42700, "T_CA": 298, "T_LT": 298, "ETA_MECH": 0.8} # Reference values for ISO conditions
output["NAMES"] = {"MainEngines": {"ME1", "ME2", "ME3", "ME4"}, "AuxEngines": {"AE1", "AE2", "AE3", "AE4"}}
output["PROPERTY_LIST"] = {}
output["PROPERTY_LIST"]["CPF"] = {"mdot", "T", "p", "h", "b", "Edot", "Bdot"} # Compressible physical flow
output["PROPERTY_LIST"]["IPF"] = {"mdot", "T", "h", "b", "Edot", "Bdot"} # Incompressible physical flow
output["PROPERTY_LIST"]["SF"] = {"mdot", "h", "b", "Edot", "Bdot"} # Incompressible physical flow
output["PROPERTY_LIST"]["Qdot"] = {"T", "Edot", "Bdot"} # Heat flow
output["PROPERTY_LIST"]["Wdot"] = {"omega", "Edot", "Bdot"} # Mechanical rotational energy flow
output["PROPERTY_LIST"]["Che"] = {"Edot", "Bdot"} # Chemical energy flows
output["PROPERTY_LIST"]["Ele"] = {"Edot", "Bdot"} # Electrical energy flows
output["PROPERTY_LIST"]["REF"] = {"Wdot": 0, "omega":0, "mdot":0, "T": 273, "p": output["P_ATM"], "h": 0, "b": 0, "Edot": 0, "Bdot": 0, "Qdot": 0}
output["EFFICIENCY_LIST"] = {}
output["EFFICIENCY_LIST"]["STANDARD"] = {"eta", "eps", "lambda", "delta"}
output["EFFICIENCY_LIST"]["HEX"] = {"eps", "lambda", "delta"}
output["EFFICIENCY_LIST"]["MIXMERGE"] = {"lambda", "delta"}
output["UNIT_TYPES"] = {}
output["UNIT_TYPES"]["STANDARD"] = {"cyl", "comp", "turbine", "ag", "boiler", "shaft"}
output["UNIT_TYPES"]["MIXMERGE"] = {"merge", "split", "distribution", "collector", "valve", "Distribution", "HotWell"}
# output["FLUIDS"] = {"BP": "Air", "Air": "Air", "Water": "Water"}
output["MDO"] = {"LHV": 42230.0, "CP": 1.8, "C": 0.87, "H": 0.13}
output["MDO"]["HHV"] = output["MDO"]["LHV"] * (1.0406 + 0.0144 * output["MDO"]["H"] / output["MDO"]["C"] * 12) * 1.01 # Calculated Higher heating value
output["MDO"]["specificExergy"] = output["MDO"]["LHV"] * (1.0401 + 0.1728 * output["MDO"]["H"] / output["MDO"]["C"])
output["HFO"] = {"LHV": 40360.0, "CP": 1.8, "C": 0.89, "H": 0.11}
output["HFO"]["HHV"] = output["HFO"]["LHV"] * (1.0406 + 0.0144 * output["HFO"]["H"] / output["HFO"]["C"] * 12) * 1.01 # Calculated Higher heating value
output["HFO"]["specificExergy"] = output["HFO"]["LHV"] * (1.0401 + 0.1728 * output["HFO"]["H"] / output["HFO"]["C"])
output["NASA_POLY"] = {"N2": [0.03298677E+02, 0.14082404E-02, -0.03963222E-04, 0.05641515E-07, -0.02444854E-10, -0.10208999E+04, 0.03950372E+02],
"O2": [3.78245636E+00, -2.99673416E-03, 9.84730201E-06, -9.68129509E-09, 3.24372837E-12, -1.06394356E+03, 3.65767573E+00],
"CO2": [2.35677352E+00, 8.98459677E-03, -7.12356269E-06, 2.45919022E-09, -1.43699548E-13, -4.83719697E+04, 9.90105222E+00],
"H2O": [4.19864056E+00, -2.03643410E-03, 6.52040211E-06, -5.48797062E-09, 1.77197817E-12, -3.02937267E+04, -8.49032208E-01]}
output["MOLAR_MASSES"] = {"N2": 28, "O2": 32, "CO2": 44, "H2O": 18}
return output
def steamProperties():
# STEAM PROPERTIES
output = {}
output["H_STEAM_LS"] = 662.0 # Specific enthalpy of 6 bar steam, saturated liquid, in [kJ/kg]
output["H_STEAM_VS"] = 2754.0 # Specific enthalpy of 6 bar steam, saturated vapour, in [kJ/kg]
output["S_STEAM_LS"] = 1.9108 # Specific entropy of 6 bar steam, saturated liquid, in [kJ/kg]
output["S_STEAM_VS"] = 6.7766 # Specific entrpy of 6 bar steam, saturated vapour, in [kJ/kg]
output["DH_STEAM"] = output["H_STEAM_VS"] - output["H_STEAM_LS"]
output["DS_STEAM"] = output["S_STEAM_VS"] - output["S_STEAM_LS"]
output["TSAT_STEAM"] = 430.0 # Saturation temperature chosen for the selected pressure, in [kJ/kg]
return output
def mainEngines(CONSTANTS):
output = {"MCR": 5890} # Main engines maximum power, in [kW]
output["RPM_DES"] = 500 # Main engine design speed, in [rpm]
output["RPM_TC_DES"] = 22000 # Main engine Turbocharger design speed, in [rpm]
output["MFR_FUEL_DES_ISO"] = 184 * output["MCR"] / 1000 / 3600 # Fuel flow at 100# load at ISO conditions, in [kg/s]. 184 is the ISO bsfc at 100% load (average for the 4 MEs)
# output["POLY_FUEL_RACK_2_MFR_FUEL"] = polyfit([24 31 38 42 46]/46, [336.3 587.8 836.6 953.1 1141]/3600, 2) # Fits a 2nd degree polynomial relating relative fuel rack position to fuel flow in kg/s
# Here we write all the required info for the fuel rack position to mass flow rate relation
output["FRP_2_MFR"] = {}
output["FRP_2_MFR"]["POLY"] = {"ME1": [-159.612, 24.23254], "ME2": [-159.612, 28.282788], "ME3": [-159.612, 28.282788], "ME4": [-159.612, 28.282788]}
output["FRP_2_MFR"]["FRP_MIN"] = {"ME1": 18.0, "ME2": 16.6, "ME3": 17.5, "ME4": 16.4}
output["FRP_2_MFR"]["FRP_MAX"] = {"ME1": 51, "ME2": 47, "ME3": 47, "ME4": 46}
# output["POLY_FUEL_LOAD_2_BSFC_ISO"] = np.polyfit(np.array([336.3, 587.8, 836.6, 953.1, 1141])/1141, [216.9, 187.1, 178.5, 179.2, 181.4], 2) # Fits a 2nd degree polynomial relating relative fuel rack position to fuel flow in kg/s
output["POLY_FUEL_LOAD_2_BSFC_ISO"] = np.polyfit(np.array([336.3, 587.8, 836.6, 953.1, 1141]) / 1141,
[205, 187.1, 178.5, 179.2, 181.4], 2) # Fits a 2nd degree polynomial relating relative fuel rack position to fuel flow in kg/s
output["POLY_RPM_2_ISO_BSFC"] = np.polyfit([315.0, 397.0, 454.0, 474.0, 500.0, 516.0], [np.mean([216.1, 207.6, 225.5, 209.9]), 188.2, 179.7, 181.6, 185, 191.1], 2)
output["POLY_LOAD_2_ISO_BSFC"] = np.polyfit([0.25, 0.5, 0.75, 0.85, 1.0, 1.1], [np.mean([216.1, 207.6, 225.5, 209.9]), 188.2, 179.7, 181.6, 185, 191.1], 2)
output["QDOT_HT_DES"] = 1650.0 # Heat flow to the HT cooling systems at design load, in [kW]
output["QDOT_LT_DES"] = 1450.0 # Heat flow to the HT cooling systems at design load, in [kW]
# output["POLY_LOAD_2_QDOT_HT"] = np.array([0.7826 , 0.2204 , 0])
output["POLY_LOAD_2_QDOT_HT"] = np.array([0.5826, 0.5204, 0])
output["POLY_LOAD_2_QDOT_LT"] = np.array([-0.1206 , 1.0978 , 0])
output["POLY_H_2_QDOT"] = np.array([-3.65E-4, +3.17E-2, 2.85E1])
output["BSFC_ISO_DES"] = np.polyval(output["POLY_LOAD_2_ISO_BSFC"], 1)
# Function handle that allows to calculate the fuel load
output["BORE"] = 0.46 # Main engine bore
output["STROKE"] = 0.58 # Main engine stroke
output["N_CYL"] = 6 # Number of cylinders
output["R_C"] = 14 # Assumption of compression ratio
output["V_SW"] = output["BORE"]**2 / 4 * np.pi * output["STROKE"] # Swept volume, in [m^3]
output["V_MAX"] = output["V_SW"] * output["R_C"] / (output["R_C"] - 1) # Maximum volume, in [m^3]
output["MFR_LO"] = 120.0 * CONSTANTS["General"]["RHO_LO"] / 3600.0 # Mass flow rate of oil in each main engine, in [kg/s]
output["MFR_LT"] = 120.0 * CONSTANTS["General"]["RHO_W"] / 3600.0 # Mass flow rate of LT cooling water, in [kg/s]
output["MFR_HT"] = 120.0 * CONSTANTS["General"]["RHO_W"] / 3600.0 # Mass flow rate of LT cooling water, in [kg/s]
output["POLY_PIN_2_ETA_IS"] = [-1.18e-2, 8.74e-2, 6.81e-1] # Polynoimial regression for isentropic efficiency of the compressor
output["ETA_CORR"] = 1.05
output["ETA_MECH_TC"] = 0.9 # Mechanical efficiency of the turbocharger [-]. Variations from "Development and validation of a new turbocharger simulation methodology for marine two stroke diesel engine modelling and diagnostic applications"
output["POLY_TC_RPM_2_ETA_MECH"] = np.polyfit(np.array([6000, 9500, 13000, 16500, 20000])/output["RPM_TC_DES"], [0.7 , 0.76 , 0.84 , 0.91 , 0.91], 2)
output["EPS_CAC_HTSTAGE"] = 0.7 # Effectiveness, as defined by the epsNTU method, of the High Temperature stage of the Charge Air Cooler, in [-]
output["ETA_GB"] = 0.985 # Mechanical efficiency of the gearbox
output["ETA_SHAFT"] = 0.99 # Mechanical efficiency of the engine shaft
output["FRP_DES"] = {"ME1": 51, "ME2": 47, "ME3": 47, "ME4": 46} # Value of the fuel rack position at 100% load
# output["BYPASS_FLOW"] = 1.1
output["STATIC_HEAD"] = 19
output["T_COOLING_MIX"] = 71 + 273.15 # Temperature of the LT/HT mix before the mixing with the HT cooling systems
output["AIR_FLOW_MULT"] = 0.95
return output
def auxiliaryEngines(CONSTANTS):
output = {"MCR": 2760.0} # Auxiliary engines maximum power, in [kW]
output["RPM_DES"] = 750.0 # Auxiliary engines design speed, in [rpm]
output["RPM_TC_DES"] = 26000 # Auxiliary engines design speed, in [rpm]
# AE_POLY_FUEL_RACK_2_MFR_FUEL = polyfit([17 27 37 44.5 46]/46, [336.3 587.8 836.6 953.1 1141]/3600, 2) # Fits a 2nd degree polynomial relating relative fuel rack position to fuel flow in kg/s
output["POLY_LOAD_2_ISO_BSFC"] = np.polyfit(np.array([0.5, 0.75, 0.85, 1.0]), np.array([193.0, 182.0, 181.0, 184.0])/184.0*190.0, 2) # Fits a 2nd degree polynomial relating relative fuel rack position to fuel flow in kg/s
output["POLY_PIN_2_ETA_IS"] = np.array([-1.18e-2, 8.74e-2, 6.81e-1])
output["BORE"] = 0.32 # Main engine bore, in [m]
output["STROKE"] = 0.40 # Main engine stroke, in [m]
output["N_CYL"] = 6.0 # Number of cylinders
output["V_SW"] = output["BORE"]**2 / 4.0 * np.pi * output["STROKE"] # Swept volume, in [m^3]
output["R_C"] = 14.0 # Assumption of compression ratio
output["V_MAX"] = output["V_SW"] * output["R_C"] / (output["R_C"] - 1) # Maximum volume, in [m^3]
output["MFR_LO"] = 70 * CONSTANTS["General"]["RHO_LO"] / 3600 # Mass flow rate of oil in each auxiliary engine, in [kg/s]
output["QDOT_2_CAC_HT_DES"] = 351.0 # Heat flow to the charge air cooler, High temperature stage, at the engine design point, in [kW]
output["QDOT_2_CAC_LT_DES"] = 433.0 # Heat flow to the charge air cooler, Low temperature stage, at the engine design point, in [kW]
output["QDOT_2_JWC_DES"] = 414.0 # Heat flow to the jacket water cooler at the engine design point, in [kW]
output["QDOT_2_LOC_DES"] = 331.0 # Heat flow to the lubricating oil cooler at the engine design point, in [kW]
output["QDOT_HT_DES"] = output["QDOT_2_CAC_HT_DES"] + output["QDOT_2_JWC_DES"]
output["QDOT_LT_DES"] = output["QDOT_2_CAC_LT_DES"] + output["QDOT_2_LOC_DES"]
# Assuming that the amount of heat from the engine to the HT cooling systems behaves in the same way as that of the main engines.
output["POLY_LOAD_2_QDOT_HT"] = CONSTANTS["MainEngines"]["POLY_LOAD_2_QDOT_HT"]
output["POLY_LOAD_2_QDOT_LT"] = CONSTANTS["MainEngines"]["POLY_LOAD_2_QDOT_LT"]
output["POLY_H_2_QDOT"] = np.array([-1.74E-3, -1.36E-2, 3.48E1])
# Assuming that the sare of the charge air cooling heat going to the HT stage is linearly increasing from 0 to its value at the engine design point.
output["POLY_LOAD_2_SHARE_CAC"] = np.polyfit([0, 1], [0, output["QDOT_2_CAC_HT_DES"]/(output["QDOT_2_CAC_HT_DES"]+output["QDOT_2_CAC_LT_DES"])], 1)
output["MFR_LT"] = 60.0 * CONSTANTS["General"]["RHO_W"] / 3600.0 # Mass flow rate of LT cooling water, in [kg/s]
output["MFR_HT"] = 60.0 * CONSTANTS["General"]["RHO_W"] / 3600.0 # Mass flow rate of HT cooling water, in [kg/s]
output["ETA_CORR"] = 1.15 # Used because one of the engines need correction, to be checked
# The efficiency is calculated at eta = eta_des - A exp(-k (x-x_ref)/x_ref)
output["AG"] = {}
output["AG"]["ETA_DES"] = 0.97
output["AG"]["A"] = 0.18
output["AG"]["k"] = 5
output["EPS_CAC_HTSTAGE"] = 0.85 # Effectiveness, as defined by the epsNTU method, of the High Temperature stage of the Charge Air Cooler, in [-]
# output["STATIC_HEAD"] = 15 # Now it is calculated instead
output["T_COOLING_MIX"] = 71 + 273.15 # Temperature of the LT/HT mix before the mixing with the HT cooling systems
return output
def otherUnits(CONSTANTS):
output = {} # Initializing the output dictionary
# Providing the optimal optimization vector
# param = [0.00000000e+00, 0.00000000e+00, 3.02453747e-01, 6.10256462e-01, 1.00000000e+00, 7.37813650e-01,
# 6.13233358e-01, 3.44252126e+02, 5.61738566e-01, 7.19654306e+06, 4.16518191e+03]
param = [2.04820342e+01, 0.00000000e+00, 3.97004436e-01, 7.22620258e-01, 1.00000000e+00, 6.81708153e-01,
2.14566717e-01, 3.47857597e+02, 5.00000000e-01, 9.54336804e+06, 5.13466015e+03]
Qdot_HTHR_constant = param[0]
Qdot_steam_constant = param[1]
HVACreheaterCoefficient = param[2]
HotWaterHeaterCoefficient = param[3]
HVACpreheaterCoefficient = param[4]
GalleyCoefficient = param[5]
OtherCoefficient = param[6]
HTHRinletTemperature = param[7]
HTHRhexEffectiveness = param[8]
BoilerStorageSize = param[9]
BoilerReferencePower = param[10]
# Boiler
output["BOILER"] = {} # Initializing the boiler sub-dictionary
output["BOILER"]["ETA_DES"] = 0.8
output["BOILER"]["ETA_REGR_X"] = [6.79E-02, 1.20E-01, 1.62E-01, 2.12E-01, 2.86E-01, 3.52E-01, 4.03E-01, 4.41E-01, 4.90E-01, 5.40E-01, 5.89E-01, 6.54E-01, 7.16E-01, 7.67E-01, 8.31E-01, 8.94E-01, 9.47E-01, 9.89E-01, 1.04E+00, 1.09E+00, 1.14E+00, 1.20E+00]
output["BOILER"]["ETA_REGR_Y"] = [0.8787, 0.8830, 0.8864, 0.8889, 0.8910, 0.8897, 0.8870, 0.8842, 0.8810, 0.8777, 0.8740, 0.8692, 0.86486, 0.8613, 0.8570, 0.8528, 0.8491, 0.8462, 0.8427, 0.8390, 0.8356, 0.8317]
output["BOILER"]["OXYGEN_EG"] = 0.04
# Here we calculate the air/fuel ratio of the boiler, given the concentration of oxygen in the exhaust
output["BOILER"]["LAMBDA"] = (CONSTANTS["General"]["MDO"]["C"] / 12 + CONSTANTS["General"]["MDO"]["H"] / 4 * (1 + output["BOILER"]["OXYGEN_EG"])
) / (1 - 100/21 * output["BOILER"]["OXYGEN_EG"]) * (32 + 100 / 21 * 28)
for idx in range(len(output["BOILER"]["ETA_REGR_Y"])):
output["BOILER"]["ETA_REGR_Y"][idx] = output["BOILER"]["ETA_REGR_Y"][idx] / max(output["BOILER"]["ETA_REGR_Y"])
output["BOILER"]["STORAGE_SIZE"] = BoilerStorageSize
output["BOILER"]["REFERENCE_POWER"] = BoilerReferencePower
# Propulsion train
output["PROPULSION"] = {}
output["PROPULSION"]["ETA_GB"] = 0.98
output["PROPULSION"]["ETA_SH"] = 0.99
# Heat demand
output["HEAT_DEMAND"] = {}
output["HEAT_DEMAND"]["DESIGN"] = {}
output["HEAT_DEMAND"]["DESIGN"]["HOT_WATER_HEATER"] = 1200
output["HEAT_DEMAND"]["DESIGN"]["HVAC_PREHEATER"] = 3500
output["HEAT_DEMAND"]["DESIGN"]["HVAC_REHEATER"] = 1780
output["HEAT_DEMAND"]["DESIGN"]["TANK_HEATING"] = 210
output["HEAT_DEMAND"]["DESIGN"]["OTHER_TANKS"] = 140
output["HEAT_DEMAND"]["DESIGN"]["HFO_TANK_HEATING"] = 270
output["HEAT_DEMAND"]["DESIGN"]["MACHINERY_SPACE_HEATERS"] = 280
output["HEAT_DEMAND"]["DESIGN"]["GALLEY"] = 600
output["HEAT_DEMAND"]["DESIGN"]["OTHER_HTHR"] = Qdot_HTHR_constant
output["HEAT_DEMAND"]["DESIGN"]["OTHER_STEAM"] = Qdot_steam_constant
output["HEAT_DEMAND"]["LIN"] = {}
output["HEAT_DEMAND"]["LIN"]["HOT_WATER_HEATER"] = HotWaterHeaterCoefficient
output["HEAT_DEMAND"]["LIN"]["HVAC_PREHEATER"] = HVACpreheaterCoefficient
output["HEAT_DEMAND"]["LIN"]["HVAC_REHEATER"] = HVACreheaterCoefficient
output["HEAT_DEMAND"]["LIN"]["OTHER_CONSUMERS"] = OtherCoefficient
output["HEAT_DEMAND"]["LIN"]["GALLEY"] = GalleyCoefficient
output["HEAT_DEMAND"]["HWH_HOURLY"] = [0.10, 0.10, 0.10, 0.10, 0.70, 1.00, 1.00, 1.00, 0.50, 0.50, 0.50, 0.50,
0.50, 0.50, 0.50, 0.50, 0.70, 0.70, 1.00, 1.00, 1.00, 0.50, 0.50, 0.30]
output["HEAT_DEMAND"]["GALLEY_HOURLY"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 1.0, 1.0, 1.0, 0.5, 1.0,
1.0, 0.5, 0.1, 0.1, 0.5, 1.0, 1.0, 1.0, 1.0, 0.5, 0.1, 0.1]
output["HEAT_DEMAND"]["T_INSIDE"] = 22 + 273.15
output["HEAT_DEMAND"]["WINTER_START"] = np.datetime64("2013-12-03")
output["HEAT_DEMAND"]["WINTER_END"] = np.datetime64("2014-04-14")
output["HEAT_DEMAND"]["SPRING_START"] = np.datetime64("2014-04-15")
output["HEAT_DEMAND"]["SPRING_END"] = np.datetime64("2014-07-02")
output["HEAT_DEMAND"]["SUMMER_START"] = np.datetime64("2014-07-03")
output["HEAT_DEMAND"]["SUMMER_END"] = np.datetime64("2014-08-21")
output["HEAT_DEMAND"]["AUTUMN_START"] = np.datetime64("2014-08-22")
output["HEAT_DEMAND"]["AUTUMN_END"] = np.datetime64("2014-12-02")
output["HEAT_DEMAND"]["TOTAL_AREA"] = (150 + 30) * 2 * 20 + 2 * 150 * 30
output["HEAT_DEMAND"]["T_AIR_REF_MIN"] = -20 + 273
output["HEAT_DEMAND"]["T_AIR_REF_MAX"] = 15 + 273
output["HEAT_DEMAND"]["TOTAL_U"] = output["HEAT_DEMAND"]["DESIGN"]["HVAC_PREHEATER"] / output["HEAT_DEMAND"]["TOTAL_AREA"] / (output["HEAT_DEMAND"]["T_INSIDE"] - output["HEAT_DEMAND"]["T_AIR_REF_MIN"])
output["HEAT_DEMAND"]["HTHR_EPS"] = HTHRhexEffectiveness # Effectiveness of the HTHR water-water heat exchangers. Old: 0.85
output["HEAT_DEMAND"]["HTHR_INLET_TEMPERATURE"] = HTHRinletTemperature
output["HEAT_DEMAND"]["HVAC_POWER_DES"] = 1500 # Design power of the HVAC compressors
return output
def seasons(dataset_raw, processed, CONSTANTS):
# This function is used to define some time-related features, such as the season, the weekends, etc.
# Definition of the weekend: all data points are 1 if it's weekend, 0 if it is a weekday.
temp = pd.Series(index = processed.index)
temp[processed.index.dayofweek == 4] = 1 # Fridays
temp[processed.index.dayofweek == 5] = 1 # Saturdays
temp[processed.index.dayofweek == 6] = 1 # Sundays
temp[temp.isnull()] = 0 # All other weekdays
temp = temp.shift(16*4-1) # Shifting to account for the fact that passengers are going on and off at 16
temp[temp.isnull()] = 0
processed["Weekends"] = temp
# Definition of the seasons based on energy demand
# 0 = winter, 1 = spring/fall, 2 = summer
temp = pd.Series(index=processed.index)
temp[CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["WINTER_START"]:CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["WINTER_END"]] = 0
temp[CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["SPRING_START"]:CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["SPRING_END"]] = 1
temp[CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["SUMMER_START"]:CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["SUMMER_END"]] = 2
temp[CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["AUTUMN_START"]:CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["AUTUMN_END"]] = 1
processed["Seasons"] = temp
# Definition of the summer based on the number of passengers
temp = | pd.Series(index=processed.index) | pandas.Series |
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import os,sys,math
import tempfile
import tarfile
import json
import pprint
#import custom_config as cc
import atomsci.ddm.utils.pubchem_utils as pu
from os import path
from target_data_curation import AMPLDataset
from atomsci.ddm.utils import struct_utils as su
from atomsci.ddm.utils import data_curation_functions as dcf
from atomsci.ddm.utils import curate_data
import atomsci.ddm.pipeline.diversity_plots as dp
import atomsci.ddm.pipeline.chem_diversity as cd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from tqdm import tqdm
# Using data_curation_functions
# Initial dataset downloaded org/record/173258#.XcXWHZJKhhE
# cd /usr/workspace/atom/excapedb/
# Look for Last-Modified and Length to check whether to download update DATE=date -Idate
#---
# wget -S https://zenodo.org/record/173258/files/pubchem.chembl.dataset4publication_inchi_smiles.tsv.xz?download=1 -O excape_data.csv.$DATE Last-Modified: 12/29/2016
# head -1 pubchem.chembl.dataset4publication_inchi_smiles.tsv > cyp3a4_excape.csv
# grep 'CYP3A4' pubchem.chembl.dataset4publication_inchi_smiles.tsv >> cyp3a4_excape.csv
#---
# grep CYP3A4 pubchem.chembl.dataset4publication_inchi_smiles.tsv > raw_data.txt
# head -1 pubchem.chembl.dataset4publication_inchi_smiles.tsv > header
# cat header raw_data.txt > cyp3a4_excape.csv
#---
class ExcapeActivityDump(AMPLDataset):
"""Class responsible for parsing and extracting data from the Excape-DB
tsv data dump file
"""
def set_columns(self,sec) :
"""
Sets expected column names for input file and reads the table.
"""
self.smiles_col = 'SMILES'
self.base_smiles_col = 'base_rdkit_smiles'
self.id_col = 'compound_id'
self.standard_col = 'Standard Type'
self.target_name_col = 'Target Name'
self.target_id_col = 'Gene_Symbol'
self.relation_col = 'relation'
self.value_col = 'activity'
self.date_col = 'Document Year'
self.data_source_name=sec
def __init__(self, parser=None, sec = None, raw_target_lst = None, dataset = None, df = None):
super().__init__()
"""
Initialize object
Params:
parser : pointer to the config file parser to access user specificied configuration settings
sec : specifies where in the config file to retrieve settings from
raw_target_lst : list of gene targets to extract data for
dataset : gives an identifier
df : dataframe storing the raw data to be extracted and curated
"""
if dataset is not None and df is not None :
self.set_columns(dataset.data_source_name)
self.df = df
else :
self.set_columns(sec)
filename = parser.check_get(sec,'activity_csv')
self.df = pd.read_csv(filename,sep="\t",engine="python",error_bad_lines=False)
#the reason to fill empty values is to track what is likely inactive measurements
#self.df['pXC50'] = self.df['pXC50'].fillna(0)
#we just ignore them for now
self.df = self.df.dropna(subset=['pXC50'])
# (Yaru) Remove inf in curated_df
self.df = self.df[~self.df.isin([np.inf]).any(1)]
# standardize column names
self.df.rename( columns={ "pXC50" : self.value_col, "Ambit_InchiKey" : self.id_col }, inplace=True)
self.df[self.relation_col] = ""
self.df.loc[self.df.Activity_Flag == 'N',self.relation_col] = '<'
self.df.loc[self.df.Activity_Flag == 'A',self.relation_col] = ''
# FILTER ON 9606 (human)
self.df=self.df[self.df.Tax_ID == 9606]
self.df = self.df[self.df[self.target_id_col].isin(raw_target_lst)]
class DrugTargetCommonsActivityDump(AMPLDataset):
"""Class responsible for parsing and extracting data from the Drug Target Commons
data dump file
"""
def set_columns(self, sec ) :
"""
Sets expected column names for input file and reads the table.
"""
self.smiles_col = 'smiles'
self.base_smiles_col = 'base_rdkit_smiles'
self.id_col = 'compound_id'
self.standard_col = 'standard_type'
self.target_name_col = 'gene_names'
self.target_id_col = 'gene_names'
self.relation_col = 'relation'
self.value_col = 'standard_value'
self.date_col = ''
self.tmp_dir = './' # where to cache retrieved SMILES strings
self.data_source_name=sec
def __init__(self, parser=None, sec = None, raw_target_lst = None, dataset = None, df = None):
super().__init__()
"""
Initialize object
Params:
parser : pointer to the config file parser to access user specificied configuration settings
sec : specifies where in the config file to retrieve settings from
raw_target_lst : list of gene targets to extract data for
dataset : gives an identifier
df : dataframe storing the raw data to be extracted and curated
"""
if dataset is not None and df is not None :
self.set_columns(dataset.data_source_name)
self.df = df
else :
self.set_columns(sec)
self.tmp_dir = parser.check_get(sec,'output_data_dir')
filename = parser.check_get(sec,'activity_csv')
self.smiles_file=parser.check_get(sec,'smiles_csv')
self.df = pd.read_csv(filename,sep=",",engine="python",error_bad_lines=False)
end_point_lst=parser.check_get(sec,"end_points").split(',')
self.df.standard_type = self.df.standard_type.str.lower()
end_point_lst = [ x.lower() for x in end_point_lst ]
#
# might want to filter on human
# data_curatoin_functions.filter_dtc_data
#
#TODO : SHOULD FILTER ON 9606 (human) -Jonathan, why isn't this done?
self.df = self.df[self.df.gene_names.isin(raw_target_lst) &
~(self.df.standard_inchi_key.isna()) &
self.df.standard_type.isin( end_point_lst ) &
(self.df.standard_units == 'NM') &
~self.df.standard_value.isna() &
~self.df.compound_id.isna() &
(self.df.wildtype_or_mutant != 'mutated') ]
####WARNING: I had to convert this explicitly to a floating point value!!!
self.df[self.value_col]=self.df[self.value_col].astype(float)
## convert values to -log10 molar values (function assumes input is in NM units)
self.df[self.value_col]=self.df[self.value_col].apply(dcf.ic50topic50)
# (Yaru) Remove inf in curated_df
self.df = self.df[~self.df.isin([np.inf]).any(1)]
# we shouldn't have to do this, but compound_id is hard coded in the aggregate_data function
# we need to use the inchikey as the compound id, since the normal compound id isn't always defined/available
# so rename column to something different and name inchikey column as the compound_id column
self.df.rename( columns={"standard_relation" : self.relation_col }, inplace=True)
self.df.rename( columns={self.id_col : "orig_compound_id" }, inplace=True)
self.df.rename( columns={"standard_inchi_key" : self.id_col }, inplace=True)
def add_base_smiles_col(self):
"""
requires a specialized SMILES curation step as SMILES strings are stored separately
"""
targLst=self.df[self.target_name_col].unique().tolist()
targLst.sort()
targ_name='_'.join(targLst)
if len(targ_name) >= 25 :
targ_name='target'
fileNameTooLong=True
else :
fileNameTooLong=False
myList=self.df[self.id_col].unique().tolist()
#
#TODO: Need to make this caching scheme user defined
# now just hardcoded to write to current directory
# Retrieve SMILES strings for compounds through PUBCHEM web interface.
# THIS is slow so it should only be done once and then cached to file
tmp_dir=self.tmp_dir
ofile=tmp_dir+targ_name+'_dtc_smiles_raw.csv'
if not path.exists(ofile):
if not path.exists(self.smiles_file) :
print("download from PubChem ")
## smiles are stored in 'smiles' column in returned dataframe
save_smiles_df,fail_lst,discard_lst=pu.download_smiles(myList)
save_smiles_df.to_csv(ofile, index=False)
else :
print("retrieve SMILES from predownloaded file",self.smiles_file)
# save_smiles_df=pd.read_csv(self.smiles_file)
# save_smiles_df.rename( columns={"inchikey" : self.id_col }, inplace=True)
# save_smiles_df.to_csv(ofile, index=False)
sed_cmd = f"sed 's/inchikey/{self.id_col}/' {self.smiles_file} > {ofile}"
os.system(sed_cmd)
save_smiles_df=pd.read_csv(ofile)
else :
print("Already download file",ofile)
save_smiles_df=pd.read_csv(ofile)
print("debug make sure smiles not empty",save_smiles_df.shape)
#the file puts the SMILES string in quotes, which need to be removed
save_smiles_df[self.smiles_col]=save_smiles_df[self.smiles_col].str.replace('"','')
#need to join SMILES strings with main data_frame
self.df=self.df.merge(save_smiles_df,on=self.id_col,suffixes=('_'+targ_name,'_'))
# calculate base rdkit smiles and add them to the dataframe
super().add_base_smiles_col()
class ChEMBLActivityDump(AMPLDataset):
"""Class responsible for parsing and extracting data from a ChEMBL json
data dump file
"""
def set_columns(self,sec) :
"""
Sets expected column names for input file and reads the table.
"""
self.smiles_col = 'smiles'
self.base_smiles_col = 'base_rdkit_smiles'
self.id_col = 'compound_id'
self.standard_col = 'standard_type'
self.target_id_col = 'gene_names'
self.relation_col = 'relation'
self.value_col = 'pAct'
self.units = 'units'
self.assay_id = 'assay_id'
self.data_source_name=sec
def __init__(self, parser=None, sec = None, raw_target_lst = None, dataset = None, df = None):
super().__init__()
"""
Initialize object
Params:
parser : pointer to the config file parser to access user specificied configuration settings
sec : specifies where in the config file to retrieve settings from
raw_target_lst : list of gene targets to extract data for
dataset : gives an identifier
df : dataframe storing the raw data to be extracted and curated
"""
if dataset is not None and df is not None :
self.set_columns(dataset.data_source_name)
self.df = df
else :
self.set_columns(sec)
mapgn=parser.check_get(sec,'target_mapping')
self.target_dict = json.load(open(mapgn))
end_point_lst=parser.check_get(sec,"end_points").split(',')
end_point_lst = [ x.lower() for x in end_point_lst ]
filename=parser.check_get(sec,'activity_csv')
dc=json.load(open(filename))
target_lst=[]
for val in raw_target_lst :
target_lst.append( self.target_dict[val] )
df_lst=[]
for kv in raw_target_lst :
tmp_df_lst=[]
for cid in dc[kv].keys() :
lst=dc[kv][cid]['pAct']
for it in range(len(lst)) :
row={ self.id_col : cid, self.value_col : dc[kv][cid]['pAct'][it], self.relation_col : dc[kv][cid]['relation'][it],
self.smiles_col : dc[kv][cid]['smiles'], self.standard_col : dc[kv][cid]['type'][it], self.units : dc[kv][cid]['units'][it], self.assay_id : dc[kv][cid]['assay_id'][it] }
tmp_df_lst.append(row)
df=pd.DataFrame(tmp_df_lst)
df[self.target_id_col] = self.target_dict[kv]
df = df.dropna(subset=[self.value_col,self.standard_col,self.id_col])
df_lst.append(df)
self.df = pd.concat(df_lst)
## do we need to do any other filter/checks here, like units?
self.df=self.df[(self.df.units.str.lower() == 'nm')]
self.df.standard_type = self.df.standard_type.str.lower()
self.df=self.df[self.df.standard_type.isin( end_point_lst ) ]
def filter_task(self, target_id):
"""when the gene target label isn't standardized, need to return the gene target mapping
"""
return self.df[(self.df[self.target_id_col] == self.target_dict[target_id])],self.target_dict[target_id]
def convert_dtype(x):
if not x:
return 0.0
try:
return float(x)
except:
return 0.0
class GPCRChEMBLActivityDump(AMPLDataset):
"""Class responsible for parsing and extracting data from a custom ChEMBL formatted csv
data dump file
"""
def set_columns(self,sec) :
"""
Sets expected column names for input file and reads the table.
"""
## complaining about mixed datatypes and I can't seem to fix it!
self.smiles_col = 'Smiles'
self.base_smiles_col = 'base_rdkit_smiles'
self.id_col = 'compound_id'
self.standard_col = 'relation'
self.target_name_col = 'Target Name'
self.target_id_col = 'gene_names'
self.relation_col = 'relation'
self.value_col = 'pChEMBL Value'
self.date_col = 'Document Year'
self.data_source_name=sec
def __init__(self, parser=None, sec = None, raw_target_lst = None, dataset = None, df = None):
super().__init__()
"""
Initialize object
Params:
parser : pointer to the config file parser to access user specificied configuration settings
sec : specifies where in the config file to retrieve settings from
raw_target_lst : list of gene targets to extract data for
dataset : gives an identifier
df : dataframe storing the raw data to be extracted and curated
"""
if dataset is not None and df is not None :
self.set_columns(dataset.data_source_name)
self.df = df
else :
self.set_columns(sec)
my_conv={'Molecular Weight' : convert_dtype}
filename = parser.check_get(sec,'activity_csv')
self.df = pd.read_csv(filename,error_bad_lines=True, index_col=False, converters=my_conv)
mapgn=parser.check_get(sec,'target_mapping')
end_point_lst=parser.check_get(sec,"end_points").split(',')
end_point_lst = [ x.lower() for x in end_point_lst ]
##warning this assumes first column is the key and second column is the value
chk = | pd.read_csv(mapgn) | pandas.read_csv |
import os
import matplotlib.pyplot as plt, mpld3
import matplotlib.ticker as ticker
import pandas as pd
import numpy as np
from my_utils import (connect_db, exists_table, update_databasebog, make_query)
def estado(table_name, conn):
casos = make_query(
f"SELECT ESTADO, count(*) as cantidad FROM {table_name} WHERE ESTADO not null GROUP BY ESTADO ORDER BY cantidad",
conn
)
_ , ax = plt.subplots()
ax.pie(casos['cantidad'], startangle=-90, autopct='%1.1f%%')
ax.legend(casos['ESTADO'], loc="upper center", ncol=2)
ax.set_title("Estado contagiados")
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.5)
plt.savefig('plots/estado')
plt.show()
def ubicacion(table_name, conn):
casos = make_query(
f"SELECT UBICACION, count(*) as cantidad FROM {table_name} WHERE UBICACION not null GROUP BY UBICACION ORDER BY cantidad",
conn
)
_ , ax = plt.subplots()
ax.pie(casos['cantidad'], wedgeprops=dict(width=0.5), startangle=-40, autopct='%1.1f%%')
ax.legend(casos['UBICACION'], loc="upper center", ncol=2)
ax.set_title("Ubicacion contagiados")
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.5)
plt.savefig('plots/ubicacion')
#plt.show()
def sexo_edad(table_name, conn):
rango_edad = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-...']
casosf = make_query(
f"""SELECT SUM(CASE WHEN EDAD <= 9.0 THEN 1 ELSE 0 END) AS [0-9],
SUM(CASE WHEN EDAD BETWEEN 10.0 AND 19.0 THEN 1 ELSE 0 END) AS [10-19],
SUM(CASE WHEN EDAD BETWEEN 20.0 AND 29.0 THEN 1 ELSE 0 END) AS [20-29],
SUM(CASE WHEN EDAD BETWEEN 30.0 AND 39.0 THEN 1 ELSE 0 END) AS [30-39],
SUM(CASE WHEN EDAD BETWEEN 40.0 AND 49.0 THEN 1 ELSE 0 END) AS [40-49],
SUM(CASE WHEN EDAD BETWEEN 50.0 AND 59.0 THEN 1 ELSE 0 END) AS [50-59],
SUM(CASE WHEN EDAD BETWEEN 60.0 AND 69.0 THEN 1 ELSE 0 END) AS [60-69],
SUM(CASE WHEN EDAD BETWEEN 70.0 AND 79.0 THEN 1 ELSE 0 END) AS [70-79],
SUM(CASE WHEN EDAD >= 80 THEN 1 ELSE 0 END) AS [80-...]
FROM {table_name}
WHERE SEXO = 'F'""",
conn
)
casosm = make_query(
f"""SELECT SUM(CASE WHEN EDAD <= 9.0 THEN 1 ELSE 0 END) AS [0-9],
SUM(CASE WHEN EDAD BETWEEN 10.0 AND 19.0 THEN 1 ELSE 0 END) AS [10-19],
SUM(CASE WHEN EDAD BETWEEN 20.0 AND 29.0 THEN 1 ELSE 0 END) AS [20-29],
SUM(CASE WHEN EDAD BETWEEN 30.0 AND 39.0 THEN 1 ELSE 0 END) AS [30-39],
SUM(CASE WHEN EDAD BETWEEN 40.0 AND 49.0 THEN 1 ELSE 0 END) AS [40-49],
SUM(CASE WHEN EDAD BETWEEN 50.0 AND 59.0 THEN 1 ELSE 0 END) AS [50-59],
SUM(CASE WHEN EDAD BETWEEN 60.0 AND 69.0 THEN 1 ELSE 0 END) AS [60-69],
SUM(CASE WHEN EDAD BETWEEN 70.0 AND 79.0 THEN 1 ELSE 0 END) AS [70-79],
SUM(CASE WHEN EDAD >= 80 THEN 1 ELSE 0 END) AS [80-...]
FROM {table_name}
WHERE SEXO = 'M'""",
conn
)
f = casosf.values.tolist()
m = casosm.values.tolist()
x = np.arange(len(rango_edad))
width = 0.35
fig, ax = plt.subplots()
mujeres = ax.bar(x - width/2, f[0], width, label='Mujeres')
hombres = ax.bar(x + width/2, m[0], width, label='Hombres')
ax.set_ylabel('Contagios')
ax.set_title('Contagios por sexo y edad')
ax.set_xticks(x)
ax.set_xticklabels(rango_edad)
ax.legend()
fig.tight_layout()
fig.savefig('plots/sexo_edad.png')
#plt.show()
def localidad_sexo(table_name, conn):
casosf = make_query(
f"SELECT LOCALIDAD_ASIS, count(*) as cantidad FROM {table_name} WHERE LOCALIDAD_ASIS not null and SEXO='F' GROUP BY LOCALIDAD_ASIS ORDER BY cantidad",
conn
)
casosm = make_query(
f"SELECT LOCALIDAD_ASIS, count(*) as cantidad FROM {table_name} WHERE LOCALIDAD_ASIS not null and SEXO='M' GROUP BY LOCALIDAD_ASIS ORDER BY cantidad",
conn
)
y = np.arange(len(casosf['LOCALIDAD_ASIS']))
width = 0.35
fig, ax = plt.subplots()
mujeres = ax.barh(y - width/2, casosf['cantidad'], width, label='Mujeres')
hombres = ax.barh(y + width/2, casosm['cantidad'], width, label='Hombres')
ax.set_ylabel('Localidad')
ax.set_yticks(y)
ax.set_yticklabels(casosf['LOCALIDAD_ASIS'])
ax.legend()
plt.title('Contagiados por localidad y sexo')
fig.tight_layout()
fig.savefig('plots/localidad_sexo.png')
plt.show()
def localidad(table_name, conn):
casos = make_query(
f"SELECT LOCALIDAD_ASIS, count(*) as cantidad FROM {table_name} WHERE LOCALIDAD_ASIS not null GROUP BY LOCALIDAD_ASIS ORDER BY cantidad",
conn
)
casos.plot(x='LOCALIDAD_ASIS', y='cantidad', kind='barh', xlabel='Localidad', ylabel='contagios')
plt.title('Contagio por localidad')
plt.tight_layout()
plt.savefig('plots/localidades.png')
#plt.show()
def evolucion_casos(table_name, conn):
casos = make_query(
f"SELECT FECHA_DIAGNOSTICO, count(*) as cantidad FROM {table_name} WHERE FECHA_DIAGNOSTICO not null GROUP BY FECHA_DIAGNOSTICO ORDER BY FECHA_DIAGNOSTICO",
conn
)
casos['FECHA_DIAGNOSTICO'] = | pd.to_datetime(casos['FECHA_DIAGNOSTICO']) | pandas.to_datetime |
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library, import_or_none
ks = import_or_none('databricks.koalas')
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), pd.Timestamp('2011-04-09 11:00:00')]
approxes = [pd.Timestamp('2011-04-09 10:31:10'), pd.Timestamp('2011-04-09 11:00:00')]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df['datetime'] < cutoff]
log_data_cutoff['percentile'] = log_data_cutoff['value'].rank(pct=True)
true_agg = log_data_cutoff.loc[log_data_cutoff['session_id'] == instance, 'percentile'].fillna(0).sum()
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df['datetime'] < approx]
log_data_approx['percentile'] = log_data_approx['value'].rank(pct=True)
true_agg_approx = log_data_approx.loc[log_data_approx['session_id'].isin([0, 1, 2]), 'percentile'].fillna(0).sum()
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_entity_feat_of_approximate(pd_es):
agg_feat = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
agg_feat3 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Max)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
dfeat2 = DirectFeature(agg_feat3, pd_es['sessions'])
p = ft.Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == feature_matrix_approx[dfeat2.get_name()].tolist()
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, 'ms'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations([feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx], 2):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(ft.Feature(agg_feat2, pd_es["sessions"]), pd_es['log'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
def test_empty_path_approximate_full(pd_es):
pd_es['sessions'].df['customer_id'] = pd.Series([np.nan, np.nan, np.nan, 1, 1, 2], dtype="category")
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[dfeat.get_name()].tolist()
assert (vals1[0] == 0)
assert (vals1[1] == 0)
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
# todo: do we need to test this situation?
# def test_empty_path_approximate_partial(pd_es):
# pd_es = copy.deepcopy(pd_es)
# pd_es['sessions'].df['customer_id'] = pd.Categorical([0, 0, np.nan, 1, 1, 2])
# agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
# agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
# times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
# cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
# feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
# pd_es,
# approximate=Timedelta(10, 's'),
# cutoff_time=cutoff_time)
# vals1 = feature_matrix[dfeat.get_name()].tolist()
# assert vals1[0] == 7
# assert np.isnan(vals1[1])
# assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approx_base_feature_is_also_first_class_feature(pd_es):
log_to_products = DirectFeature(pd_es['products']['rating'], pd_es['log'])
# This should still be computed properly
agg_feat = ft.Feature(log_to_products, parent_entity=pd_es['sessions'], primitive=Min)
customer_agg_feat = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
# This is to be approximated
sess_to_cust = DirectFeature(customer_agg_feat, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([sess_to_cust, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
vals1 = feature_matrix[sess_to_cust.get_name()].tolist()
assert vals1 == [8.5, 7]
vals2 = feature_matrix[agg_feat.get_name()].tolist()
assert vals2 == [4, 1.5]
def test_approximate_time_split_returns_the_same_result(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:07:30'),
pd.Timestamp('2011-04-09 10:07:40')],
'instance_id': [0, 0]})
feature_matrix_at_once = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
divided_matrices = []
separate_cutoff = [cutoff_df.iloc[0:1], cutoff_df.iloc[1:]]
# Make sure indexes are different
# Not that this step is unecessary and done to showcase the issue here
separate_cutoff[0].index = [0]
separate_cutoff[1].index = [1]
for ct in separate_cutoff:
fm = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=ct)
divided_matrices.append(fm)
feature_matrix_from_split = pd.concat(divided_matrices)
assert feature_matrix_from_split.shape == feature_matrix_at_once.shape
for i1, i2 in zip(feature_matrix_at_once.index, feature_matrix_from_split.index):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
for c in feature_matrix_from_split:
for i1, i2 in zip(feature_matrix_at_once[c], feature_matrix_from_split[c]):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
def test_approximate_returns_correct_empty_default_values(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['sessions'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 11:00:00'),
pd.Timestamp('2011-04-09 11:00:00')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [0, 10]
# def test_approximate_deep_recurse(pd_es):
# pd_es = pd_es
# agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# dfeat1 = DirectFeature(agg_feat, pd_es['sessions'])
# agg_feat2 = Sum(dfeat1, pd_es['customers'])
# dfeat2 = DirectFeature(agg_feat2, pd_es['sessions'])
# agg_feat3 = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['products'], primitive=Count)
# dfeat3 = DirectFeature(agg_feat3, pd_es['log'])
# agg_feat4 = Sum(dfeat3, pd_es['sessions'])
# feature_matrix = calculate_feature_matrix([dfeat2, agg_feat4],
# pd_es,
# instance_ids=[0, 2],
# approximate=Timedelta(10, 's'),
# cutoff_time=[datetime(2011, 4, 9, 10, 31, 19),
# datetime(2011, 4, 9, 11, 0, 0)])
# # dfeat2 and agg_feat4 should both be approximated
def test_approximate_child_aggs_handled_correctly(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
agg_feat_2 = ft.Feature(pd_es['log']['value'], parent_entity=pd_es['customers'], primitive=Sum)
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
fm = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
fm_2 = calculate_feature_matrix([dfeat, agg_feat_2],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_df)
assert fm[dfeat.get_name()].tolist() == [2, 3]
assert fm_2[agg_feat_2.get_name()].tolist() == [0, 5]
def test_cutoff_time_naming(es):
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-08 10:30:00'),
pd.Timestamp('2011-04-09 10:30:06')],
'instance_id': [0, 0]})
cutoff_df_index_name = cutoff_df.rename(columns={"instance_id": "id"})
cutoff_df_wrong_index_name = cutoff_df.rename(columns={"instance_id": "wrong_id"})
cutoff_df_wrong_time_name = cutoff_df.rename(columns={"time": "cutoff_time"})
fm1 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
fm1 = to_pandas(fm1, index='id', sort_index=True)
fm2 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_index_name)
fm2 = to_pandas(fm2, index='id', sort_index=True)
assert all((fm1 == fm2.values).values)
error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity index or a column named "instance_id"'
with pytest.raises(AttributeError, match=error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_index_name)
time_error_text = 'Cutoff time DataFrame must contain a column with either the same name' \
' as the target entity time_index or a column named "time"'
with pytest.raises(AttributeError, match=time_error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_time_name)
# TODO: order doesn't match, but output matches
def test_cutoff_time_extra_columns(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed result not ordered')
agg_feat = ft.Feature(es['customers']['id'], parent_entity=es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, es['customers'])
cutoff_df = pd.DataFrame({'time': [pd.Timestamp('2011-04-09 10:30:06'),
pd.Timestamp('2011-04-09 10:30:03'),
pd.Timestamp('2011-04-08 10:30:00')],
'instance_id': [0, 1, 0],
'label': [True, True, False]},
columns=['time', 'instance_id', 'label'])
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
# check column was added to end of matrix
assert 'label' == fm.columns[-1]
assert (fm['label'].values == cutoff_df['label'].values).all()
def test_cutoff_time_extra_columns_approximate(pd_es):
agg_feat = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
cutoff_df = pd.DataFrame({'time': [ | pd.Timestamp('2011-04-09 10:30:06') | pandas.Timestamp |
import requests
from bs4 import BeautifulSoup
import pandas as pd
from difflib import SequenceMatcher
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_colwidth', None) #To display full URL in dataframe
from datetime import datetime
def df_column_switch(df, column1, column2):
i = list(df.columns)
a, b = i.index(column1), i.index(column2)
i[b], i[a] = i[a], i[b]
df = df[i]
return df
def create_jobsdf_greenhouse(company_name, url, save_to_excel = False):
'''
Add function description here
'''
url = str(url)
company_name = str(company_name)
page = requests.get(url)
sections = BeautifulSoup(page.text, 'html.parser').find_all('section', {'class': 'level-0'})
roles = []
role_urls = []
for section in sections:
#print(section)
for opening in section.find_all('div', {'class': 'opening'}):
#print(opening)
#print(' ')
role_title = opening.find('a').getText().strip()
role_location = opening.find('span', {'class': 'location'}).getText().strip()
partial_url = [elem.get('href') for elem in opening.find_all('a')][0]
if ((company_name == 'Optiver') or
(company_name == 'Glovo') or
(company_name == 'Graviton Research Capital')):
job_no = partial_url.split('/')[-1].split('=')[-1]
role_url = partial_url
elif company_name == 'Squarepoint Capital':
job_no = partial_url.split('/')[-1].split('=')[-1]
role_url = partial_url.split('?')[0] + '/job#' + job_no
else:
job_no = partial_url.split('/')[-1]
common_size = SequenceMatcher(None, partial_url, url).get_matching_blocks()[0].size #U #.find_longest_match(0, len(partial_url), 0, len(url))
role_url = url + partial_url[common_size : ] #U
roles.append(role_title + ' - ' + role_location + ' - ' + job_no)
role_urls.append(role_url)
jobs_df = pd.DataFrame( | pd.Series(roles) | pandas.Series |
import pandas as pd
import numpy as np
import os
import math
import random
import pickle
from typing import List, Tuple
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMRegressor
from progress.bar import Bar
from prismx.utils import read_gmt, load_correlation, load_feature
from prismx.feature import load_features
def create_training_data(workdir: str, gmt_file: str, false_sample_count: int=50000) -> List:
correlation_files = os.listdir(workdir+"/correlation")
correlation = load_correlation(workdir, 0)
background_genes = list(correlation.columns)
library, rev_library, ugenes = read_gmt(gmt_file, background_genes)
df_true = pd.DataFrame()
lk = list(range(0, len(correlation_files)-1))
lk.append("global")
bar = Bar('Retrieve training data', max=2*len(lk))
for i in lk:
feature = load_feature(workdir, i)
features = []
keys = list(feature.columns)
setname = []
genename = []
for se in keys:
vals = library[se]
for val in vals:
setname.append(val)
genename.append(se)
features.append(feature.loc[val.encode('UTF-8'), se])
df_true.loc[:,i] = features
bar.next()
df_true2 = pd.concat([pd.DataFrame(genename), pd.DataFrame(setname),df_true, pd.DataFrame(np.ones(len(setname)))], axis=1)
samp_set = []
samp_gene = []
npw = np.array(df_true2.iloc[:, 0])
false_gene_count = math.ceil(false_sample_count/len(background_genes))
for i in background_genes:
rkey = random.sample(keys,1)[0]
ww = np.where(npw == rkey)[0]
for j in range(0, false_gene_count):
rgene = random.sample(background_genes,1)[0]
if rgene not in df_true2.iloc[ww, 1]:
samp_set.append(rkey)
samp_gene.append(rgene)
df_false = pd.DataFrame()
Bar('Retrieve false samples ', max=len(lk))
for i in lk:
feature = load_feature(workdir, i)
features = []
setname = []
genename = []
for k in range(0,len(samp_set)):
se = samp_set[k]
val = samp_gene[k]
setname.append(se)
genename.append(val)
features.append(feature.loc[val.encode('UTF-8'), se])
df_false.loc[:,i] = features
bar.next()
df_false2 = pd.concat([ | pd.DataFrame(setname) | pandas.DataFrame |
import os, sys, inspect, itertools
sys.path.insert(1, os.path.join(sys.path[0], '../../'))
import numpy as np
import random
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingRegressor, IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from scipy.stats import spearmanr
from core.concentration import *
from tqdm import tqdm
import pdb
def fix_randomness(seed=0):
np.random.seed(seed=seed)
random.seed(seed)
def get_data():
#df = pd.concat( [ pd.read_csv('./data/meps_19_reg.csv'), pd.read_csv('./data/meps_20_reg.csv'), pd.read_csv('./data/meps_21_reg.csv') ] )
df = | pd.read_csv('./data/meps_19_reg.csv') | pandas.read_csv |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines objects storing results from cross-correlation analysis.
"""
from __future__ import print_function
import numpy as np
import os
import pandas as pd
from tunacell.io import analysis
from tunacell.stats.utils import _dtype_converter
class BivariateConditioned(object):
"""Stores dynamics conditioned statistics for a couple of observables.
Parameters
----------
obss : couple of :class:`Observable` instances
times : couple of 1d arrays
each item refers to the array of times at which single obs conditioned
statistics have been evaluated. The row and column indices of created
matrices refers to the indices of first and second item respectively.
applied_filter : :class:`FilterSet` instance
"""
def __init__(self, bivariate, applied_filter=None):
self.bivariate = bivariate
self.applied_filter = applied_filter
if applied_filter is not None:
self.condition = repr(applied_filter)
else:
self.condition = 'master'
# alias
self.times = [uni.eval_times for uni in self.bivariate.univariates]
self.counts = None # 2-d array, sample counts at time t_i, t_j
self.cross = None # 2-d array, covariance matrix at (t_i, t_j)
self.std_dev = None # 2-d array, standard dev of covariances estimates
return
def as_dataframe(self):
# unroll
row_times = self.times[0]
col_times = self.times[1]
unroll_row_times = np.concatenate([np.array(len(col_times) * [rt, ]) for rt in row_times])
unroll_col_times = np.concatenate([col_times for rt in row_times])
counts = self.counts.flatten()
cov = self.cross.flatten()
std = self.std_dev.flatten()
names = ['time-row', 'time-col', 'counts', 'covariance', 'std-cov']
data = {'time-row': unroll_row_times,
'time-col': unroll_col_times,
'counts': counts,
'covariance': cov,
'std-cov' : std
}
return pd.DataFrame(data, columns=names)
def _get_path(self, user_root=None, write=False):
"""Get condition path"""
obs_path = self.bivariate._get_obs_path(user_root=user_root, write=write)
res = analysis.get_condition_path(obs_path, self.applied_filter, write=write)
index_condition, condition_path = res
return condition_path
def write_text(self, path=None):
# 2 columns for times (already stored elsewhere, but just in case)
cdt_path = self._get_path(user_root=path, write=True)
item_path = os.path.join(cdt_path, 'times.tsv')
with open(item_path, 'w') as f:
f.write('row:')
for t in self.times[0]:
f.write('\t{:.2f}'.format(t))
f.write('\n')
f.write('column:')
for t in self.times[1]:
f.write('\t{:.2f}'.format(t))
f.write('\n')
# matrix for counts
item_path = os.path.join(cdt_path, 'count_cross.tsv')
np.savetxt(item_path, self.counts, fmt='%d', delimiter='\t')
# matrix for cross-correlations
item_path = os.path.join(cdt_path, 'cross.tsv')
np.savetxt(item_path, self.cross, fmt='%.8e', delimiter='\t')
item_path = os.path.join(cdt_path, 'std_dev.tsv')
np.savetxt(item_path, self.std_dev, fmt='%.8e', delimiter='\t')
return
def read_text(self, path=None):
cdt_path = self._get_path(user_root=path, write=False)
item_path = os.path.join(cdt_path, 'times.tsv')
if not os.path.exists(item_path):
raise analysis.MissingFileError(item_path)
times = []
with open(item_path, 'r') as f:
for line in f.readlines():
times.append(list(map(float, line.rstrip().split('\t')[1:])))
self.times = times
# matrix for counts
item_path = os.path.join(cdt_path, 'count_cross.tsv')
if not os.path.exists(item_path):
raise analysis.MissingFileError(item_path)
arr = np.genfromtxt(item_path, dtype='i8', delimiter='\t')
self.counts = arr
# matrix for cross-correlations
item_path = os.path.join(cdt_path, 'cross.tsv')
if not os.path.exists(item_path):
raise analysis.MissingFileError(item_path)
arr = np.genfromtxt(item_path, dtype='f8', delimiter='\t')
self.cross = arr
# matrix for standard deviation of covariance estimates
item_path = os.path.join(cdt_path, 'std_dev.tsv')
if not os.path.exists(item_path):
raise analysis.MissingFileError(item_path)
arr = np.genfromtxt(item_path, dtype='f8', delimiter='\t')
self.std_dev = arr
pass
def compute_stationary(self, indexify, tmin=None, tmax=None):
"""Computes stationary cross-correlation between tmin and tmax.
DEPRECATED
"""
# Narrow down valid times?
time1 = self.single[str(self.obs[0])].time
time2 = self.single[str(self.obs[1])].time
rec = {}
for index, t1 in enumerate(time1):
if tmin is not None and t1 < tmin:
continue
if tmax is not None and t1 >= tmax:
continue
for jindex, t2 in enumerate(time2):
if tmin is not None and t2 < tmin:
continue
if tmax is not None and t2 >= tmax:
continue
dt = t2 - t1
sdt = indexify.indexify(dt)
if sdt not in rec.keys():
rec[sdt] = [0, 0.]
# Hum, I may have made a mistake, comment it:
dcount = self.count_cross[index, jindex]
rec[sdt][0] += dcount
rec[sdt][1] += dcount * self.cross[index, jindex]
dt_array = np.array(sorted(map(indexify.desindexify, rec.keys())))
count_array = np.zeros(len(dt_array), dtype='u4')
corr_array = np.zeros(len(dt_array), dtype='f8')
for index, dt in enumerate(dt_array):
sdt = indexify.indexify(dt)
count_array[index] = rec[sdt][0]
corr_array[index] = rec[sdt][1]/rec[sdt][0]
array = np.zeros(len(dt_array), dtype=[('time_interval', 'f8'),
('count', 'u4'),
('cross-correlation', 'f8')])
array['time_interval'] = dt_array
array['count'] = count_array
array['cross-correlation'] = corr_array
return array
class BivariateError(Exception):
pass
class BivariateIOError(IOError):
pass
class Bivariate(object):
"""Stores dynamics statistics for a couple of observables.
Parameters
----------
row_univariate : :class:`Univariate`
corresponds to row in cross-correlation matrices
col_univariate : :class:`Univariate`
corresponds to colum in cross-correlation matrices
"""
def __init__(self, row_univariate, col_univariate):
# check whether exp instances match
s1, s2 = row_univariate, col_univariate
if s1.exp.abspath != s2.exp.abspath:
raise BivariateError('Experiments do not match')
if repr(s1.exp.fset) != repr(s2.exp.fset):
raise BivariateError('Filter sets do not match')
self.univariates = (row_univariate, col_univariate)
self.exp = s1.exp
# build common conditions
cset = []
for cdt in s1.cset:
if repr(cdt) in map(repr, s2.cset):
cset.append(cdt)
self.cset = cset
self._items = {}
self._condition_labels = ['master', ]
# alias
Bic = BivariateConditioned
self._items['master'] = Bic(self, applied_filter=None)
for cdt in cset:
lab = repr(cdt)
self._condition_labels.append(lab)
self._items[lab] = Bic(self, applied_filter=cdt)
return
def _get_obs_path(self, user_root=None, write=False):
"""Get observable path"""
obss = [univ.obs for univ in self.univariates]
exp = self.exp
fset = self.exp.fset
analysis_path = analysis.get_analysis_path(exp, user_abspath=user_root,
write=write)
res = analysis.get_filter_path(analysis_path, fset, write=write)
index_filter, filter_path = res
obs_path = analysis.get_biobservable_path(filter_path, obss, write=write)
return obs_path
def export_text(self, analysis_folder=None):
# write each condition
for key, val in self._items.items():
val.write_text(analysis_folder)
return
def import_from_text(self, analysis_folder=None):
# read each condition
try:
for key, val in self._items.items():
val.read_text(analysis_folder)
except (analysis.MissingFileError, analysis.MissingFolderError) as missing:
raise BivariateIOError(missing)
return
def __getitem__(self, key):
return self._items[key]
@property
def master(self):
"""There's always a master (no condition)"""
return self['master']
class StationaryBivariateConditioned(object):
"""Cross-correlation as a function of time difference, for a univariate cdtion.
Parameters
----------
obss : couple of :class:`Observable` instances
times : couple of 1d arrays
each item refers to the array of times at which univariate obs conditioned
statistics have been evaluated. The row and column indices of created
matrices refers to the indices of first and second item respectively.
applied_filter : :class:`FilterSet` instance
tmin : float (default None)
tmax : float (default None)
adjust_mean : str {'global', 'local'}
"""
def __init__(self, statbivariate, applied_filter=None, array=None):
self.statbivariate = statbivariate
self.basename = 'stationary_cross'
# add region label
self.basename += '_' + self.statbivariate.region.name
# add computation options
self.basename += '_' + self.statbivariate.options.as_string_code()
self.applied_filter = applied_filter
self.basename = 'stationary_bivariate'
if applied_filter is not None:
self.condition = repr(applied_filter)
else:
self.condition = 'master'
self.array = array # should be a 3 columns array
return
def as_dataframe(self):
return pd.DataFrame(self.array)
def _get_path(self, user_root=None, write=False):
"""Get condition path"""
obs_path = self.statbivariate._get_obs_path(user_root=user_root, write=write)
res = analysis.get_condition_path(obs_path, self.applied_filter, write=write)
index_condition, condition_path = res
return condition_path
def write_text(self, path='.'):
"""Write array to file."""
# get condition p
cdt_path = self._get_path(user_root=path, write=True)
if self.array is None:
print('Nothing to write')
return
ffmt = '%.8e' # floating point numbers
ifmt = '%d' # integers
item_path = os.path.join(cdt_path, self.basename + '.tsv')
names = self.array.dtype.names
header = '\t'.join(names)
fmt = [ifmt if 'count' in n_ else ffmt for n_ in names]
np.savetxt(item_path, self.array, fmt=fmt,
delimiter='\t', comments='', header=header)
return
def read_text(self, path='.'):
"""Initialize object by reading text output."""
cdt_path = self._get_path(user_root=path, write=False)
item_path = os.path.join(cdt_path, self.basename + '.tsv')
if not os.path.exists(item_path):
raise analysis.MissingFileError(item_path)
arr = np.genfromtxt(item_path, delimiter='\t', names=True)
self.array = arr
return
@property
def time(self):
if self.array is not None:
return self.array['time_interval']
else:
return None
@property
def count(self):
if self.array is not None:
return self.array['count']
else:
return None
@property
def crosscorr(self):
if self.array is not None:
return self.array['cross_correlation']
else:
return None
class StationaryBivariateIOError(IOError):
pass
class StationaryBivariate(object):
"""Cross-correlation analysis at stationarity
Parameters
----------
row_univariate : :class:`Univariate` instance
col_univariate : :class:`Univariate` instance
region : object with tmin, tmax, name attributes
determine where the process is (hypothetically) stationary
options : :class:`CompuParams` instance
"""
def __init__(self, row_univariate, col_univariate,
region=None, options=None):
self.region = region
self.options = options
s1, s2 = row_univariate, col_univariate
# obss = [univariate.obs for univariate in univariates]
if s1.exp.abspath != s2.exp.abspath:
raise BivariateError('Experiments do not match')
if repr(s1.exp.fset) != repr(s2.exp.fset):
raise BivariateError('Filter sets do not match')
self.univariates = (row_univariate, col_univariate)
self.label = self.region.name
self.tmin = self.region.tmin
self.tmax = self.region.tmax
self.adjust_mean = self.options.adjust_mean
self.disjoint = self.options.disjoint
self.exp = s1.exp
# build common conditions
cset = []
for cdt in s1.cset:
if repr(cdt) in map(repr, s2.cset):
cset.append(cdt)
self.cset = cset
self.dataframe = None # to be updated with pandas.DataFrame object
self._condition_labels = ['master', ]
self._items = {}
# alias
SBic = StationaryBivariateConditioned
self._items['master'] = SBic(self, applied_filter=None, array=None)
for cdt in cset:
self._condition_labels.append(repr(cdt))
self._items[repr(cdt)] = SBic(self, applied_filter=cdt, array=None)
return
def _get_obs_path(self, user_root=None, write=False):
"""Get observable path"""
obss = [univ.obs for univ in self.univariates]
exp = self.exp
fset = self.exp.fset
analysis_path = analysis.get_analysis_path(exp, user_abspath=user_root,
write=write)
res = analysis.get_filter_path(analysis_path, fset, write=write)
index_filter, filter_path = res
obs_path = analysis.get_biobservable_path(filter_path, obss, write=write)
return obs_path
def export_text(self, analysis_folder=None):
# write each condition
try:
for key, val in self._items.items():
val.write_text(analysis_folder)
# when not possible it means single object has not been exported yet
except analysis.MissingFolderError:
for uni in self.univariates:
uni.export_text(analysis_folder)
for key, val in self._items.items():
val.write_text(analysis_folder)
# export dataframe as csv file
if self.dataframe is not None:
exp = self.exp
fset = self.exp.fset
analysis_path = analysis.get_analysis_path(exp,
user_abspath=analysis_folder,
write=True)
res = analysis.get_filter_path(analysis_path, fset, write=True)
index_filter, filter_path = res
o1, o2 = [uni.obs for uni in self.univariates]
basename = 'data_{}_{}---{}'.format(self.label, o1.name, o2.name)
text_file = os.path.join(filter_path, basename + '.csv')
self.dataframe.to_csv(text_file, index=False)
return
def import_from_text(self, analysis_folder=None):
try:
for key, val in self._items.items():
val.read_text(analysis_folder)
exp = self.exp
fset = self.exp.fset
analysis_path = analysis.get_analysis_path(exp, user_abspath=analysis_folder,
write=False)
res = analysis.get_filter_path(analysis_path, fset, write=False)
index_filter, filter_path = res
o1, o2 = [uni.obs for uni in self.univariates]
basename = 'data_{}_{}---{}'.format(self.label, o1.name, o2.name)
text_file = os.path.join(filter_path, basename + '.csv')
if not os.path.exists(text_file):
raise analysis.MissingFileError
df = | pd.read_csv(text_file, index_col=False) | pandas.read_csv |
import logging
import pandas as pd
import numpy as np
from spaceone.core.manager import BaseManager
from spaceone.statistics.error import *
from spaceone.statistics.connector.service_connector import ServiceConnector
_LOGGER = logging.getLogger(__name__)
_JOIN_TYPE_MAP = {
'LEFT': 'left',
'RIGHT': 'right',
'OUTER': 'outer',
'INNER': 'inner'
}
_SUPPORTED_AGGREGATE_OPERATIONS = [
'query',
'join',
'concat',
'sort',
'formula',
'fill_na'
]
class ResourceManager(BaseManager):
def stat(self, aggregate, page, domain_id):
results = self._execute_aggregate_operations(aggregate, domain_id)
return self._page(page, results)
def _execute_aggregate_operations(self, aggregate, domain_id):
df = None
if 'query' not in aggregate[0]:
raise ERROR_REQUIRED_QUERY_OPERATION()
for stage in aggregate:
if 'query' in stage:
df = self._query(stage['query'], domain_id)
elif 'join' in stage:
df = self._join(stage['join'], domain_id, df)
elif 'concat' in stage:
df = self._concat(stage['concat'], domain_id, df)
elif 'sort' in stage:
df = self._sort(stage['sort'], df)
elif 'formula' in stage:
df = self._execute_formula(stage['formula'], df)
elif 'fill_na' in stage:
df = self._fill_na(stage['fill_na'], df)
else:
raise ERROR_REQUIRED_PARAMETER(key='aggregate.query | aggregate.join | aggregate.concat | '
'aggregate.sort | aggregate.formula | aggregate.fill_na')
df = df.replace({np.nan: None})
results = df.to_dict('records')
return results
@staticmethod
def _fill_na(options, base_df):
data = options.get('data', {})
if len(data.keys()) > 0:
base_df = base_df.fillna(data)
return base_df
def _execute_formula(self, options, base_df):
if len(base_df) > 0:
if 'eval' in options:
base_df = self._execute_formula_eval(options['eval'], base_df)
elif 'query' in options:
base_df = self._execute_formula_query(options['query'], base_df)
else:
raise ERROR_REQUIRED_PARAMETER(key='aggregate.formula.eval | aggregate.formula.query')
return base_df
@staticmethod
def _execute_formula_query(formula, base_df):
try:
base_df = base_df.query(formula)
except Exception as e:
raise ERROR_STATISTICS_FORMULA(formula=formula)
return base_df
@staticmethod
def _execute_formula_eval(formula, base_df):
try:
base_df = base_df.eval(formula)
except Exception as e:
raise ERROR_STATISTICS_FORMULA(formula=formula)
return base_df
@staticmethod
def _sort(options, base_df):
if 'key' in options and len(base_df) > 0:
ascending = not options.get('desc', False)
try:
return base_df.sort_values(by=options['key'], ascending=ascending)
except Exception as e:
raise ERROR_STATISTICS_QUERY(reason=f'Sorting failed. (sort = {options})')
else:
return base_df
def _concat(self, options, domain_id, base_df):
concat_df = self._query(options, domain_id, operator='join')
try:
base_df = pd.concat([base_df, concat_df], ignore_index=True)
except Exception as e:
raise ERROR_STATISTICS_CONCAT(reason=str(e))
return base_df
@staticmethod
def _generate_empty_data(query):
empty_data = {}
aggregate = query.get('aggregate', [])
aggregate.reverse()
for stage in aggregate:
if 'group' in stage:
group = stage['group']
for key in group.get('keys', []):
if 'name' in key:
empty_data[key['name']] = []
for field in group.get('fields', []):
if 'name' in field:
empty_data[field['name']] = []
break
return pd.DataFrame(empty_data)
def _join(self, options, domain_id, base_df):
if 'type' in options and options['type'] not in _JOIN_TYPE_MAP:
raise ERROR_INVALID_PARAMETER_TYPE(key='aggregate.join.type', type=list(_JOIN_TYPE_MAP.keys()))
join_keys = options.get('keys')
join_type = options.get('type', 'LEFT')
join_df = self._query(options, domain_id, operator='join')
try:
if join_keys:
base_df = pd.merge(base_df, join_df, on=join_keys, how=_JOIN_TYPE_MAP[join_type])
else:
base_df = pd.merge(base_df, join_df, left_index=True, right_index=True, how=_JOIN_TYPE_MAP[join_type])
except Exception as e:
if join_keys is None:
raise ERROR_STATISTICS_INDEX_JOIN(reason=str(e))
else:
raise ERROR_STATISTICS_JOIN(resource_type=options['resource_type'], join_keys=join_keys)
return base_df
def _query(self, options, domain_id, operator='query'):
resource_type = options.get('resource_type')
query = options.get('query')
extend_data = options.get('extend_data', {})
if resource_type is None:
raise ERROR_REQUIRED_PARAMETER(key=f'aggregate.{operator}.resource_type')
if query is None:
raise ERROR_REQUIRED_PARAMETER(key=f'aggregate.{operator}.query')
self.service_connector: ServiceConnector = self.locator.get_connector('ServiceConnector')
service, resource = self._parse_resource_type(resource_type)
try:
response = self.service_connector.stat_resource(service, resource, query, domain_id)
results = response.get('results', [])
if len(results) > 0 and not isinstance(results[0], dict):
df = pd.DataFrame(results, columns=['value'])
else:
df = | pd.DataFrame(results) | pandas.DataFrame |
# topic_modeling.py
# Topic model analysis for professional articles on machine intelligence.
import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
def buildAcademicData(data):
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.