prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import itertools
import subprocess
import io
import re
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.periodic_table import get_el_sp
from veidt.abstract import Describer
from veidt.potential.processing import pool_from
class BispectrumCoefficients(Describer):
"""
Bispectrum coefficients to describe the local environment of each
atom in a quantitative way.
"""
def __init__(self, rcutfac, twojmax, element_profile, rfac0=0.99363,
rmin0=0, diagonalstyle=3, quadratic=False, pot_fit=False):
"""
Args:
rcutfac (float): Global cutoff distance.
twojmax (int): Band limit for bispectrum components.
element_profile (dict): Parameters (cutoff factor 'r' and
weight 'w') related to each element, e.g.,
{'Na': {'r': 0.3, 'w': 0.9},
'Cl': {'r': 0.7, 'w': 3.0}}
rfac0 (float): Parameter in distance to angle conversion.
Set between (0, 1), default to 0.99363.
rmin0 (float): Parameter in distance to angle conversion.
Default to 0.
diagonalstyle (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3,
default to 3.
quadratic (bool): Whether including quadratic terms.
Default to False.
pot_fit (bool): Whether to output in potential fitting
format. Default to False, i.e., returning the bispectrum
coefficients for each site.
"""
from veidt.potential.lammps.calcs import SpectralNeighborAnalysis
self.calculator = SpectralNeighborAnalysis(rcutfac, twojmax,
element_profile,
rfac0, rmin0,
diagonalstyle,
quadratic)
self.rcutfac = rcutfac
self.twojmax = twojmax
self.element_profile = element_profile
self.rfac0 = rfac0
self.rmin0 = rmin0
self.diagonalstyle = diagonalstyle
self.elements = sorted(element_profile.keys(),
key=lambda sym: get_el_sp(sym).X)
self.quadratic = quadratic
self.pot_fit = pot_fit
@property
def subscripts(self):
"""
The subscripts (2j1, 2j2, 2j) of all bispectrum components
involved.
"""
return self.calculator.get_bs_subscripts(self.twojmax,
self.diagonalstyle)
def describe(self, structure, include_stress=False):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame.
In regular format, the columns are the subscripts of
bispectrum components, while indices are the site indices
in input structure.
In potential fitting format, to match the sequence of
[energy, f_x[0], f_y[0], ..., f_z[N], v_xx, ..., v_xy], the
bispectrum coefficients are summed up by each specie and
normalized by a factor of No. of atoms (in the 1st row),
while the derivatives in each direction are preserved, with
the columns being the subscripts of bispectrum components
with each specie and the indices being
[0, '0_x', '0_y', ..., 'N_z'], and the virial contributions
(in GPa) are summed up for all atoms for each component in
the sequence of ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'].
"""
return self.describe_all([structure], include_stress).xs(0, level='input_index')
def describe_all(self, structures, include_stress=False):
"""
Returns data for all input structures in a single DataFrame.
Args:
structures (Structure): Input structures as a list.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame with indices of input list preserved. To retrieve
the data for structures[i], use
df.xs(i, level='input_index').
"""
columns = list(map(lambda s: '-'.join(['%d' % i for i in s]),
self.subscripts))
if self.quadratic:
columns += list(map(lambda s: '-'.join(['%d%d%d' % (i, j, k)
for i, j, k in s]),
itertools.combinations_with_replacement(self.subscripts, 2)))
raw_data = self.calculator.calculate(structures)
def process(output, combine, idx, include_stress):
b, db, vb, e = output
df = pd.DataFrame(b, columns=columns)
if combine:
df_add = pd.DataFrame({'element': e, 'n': np.ones(len(e))})
df_b = df_add.join(df)
n_atoms = df_b.shape[0]
b_by_el = [df_b[df_b['element'] == e] for e in self.elements]
sum_b = [df[df.columns[1:]].sum(axis=0) for df in b_by_el]
hstack_b = pd.concat(sum_b, keys=self.elements)
hstack_b = hstack_b.to_frame().T / n_atoms
hstack_b.fillna(0, inplace=True)
dbs = np.split(db, len(self.elements), axis=1)
dbs = np.hstack([np.insert(d.reshape(-1, len(columns)),
0, 0, axis=1) for d in dbs])
db_index = ['%d_%s' % (i, d)
for i in df_b.index for d in 'xyz']
df_db = pd.DataFrame(dbs, index=db_index,
columns=hstack_b.columns)
if include_stress:
vbs = np.split(vb.sum(axis=0), len(self.elements))
vbs = np.hstack([np.insert(v.reshape(-1, len(columns)),
0, 0, axis=1) for v in vbs])
volume = structures[idx].volume
vbs = vbs / volume * 160.21766208 # from eV to GPa
vb_index = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']
df_vb = pd.DataFrame(vbs, index=vb_index,
columns=hstack_b.columns)
df = pd.concat([hstack_b, df_db, df_vb])
else:
df = pd.concat([hstack_b, df_db])
return df
df = pd.concat([process(d, self.pot_fit, i, include_stress)
for i, d in enumerate(raw_data)],
keys=range(len(raw_data)), names=["input_index", None])
return df
class AGNIFingerprints(Describer):
"""
Fingerprints for AGNI (Adaptive, Generalizable and Neighborhood
Informed) force field. Elemental systems only.
"""
def __init__(self, r_cut, etas):
"""
Args:
r_cut (float): Cutoff distance.
etas (numpy.array): All eta parameters in 1D array.
"""
self.r_cut = r_cut
self.etas = etas
def describe(self, structure):
"""
Calculate fingerprints for all sites in a structure.
Args:
structure (Structure): Input structure.
Returns:
DataFrame.
"""
all_neighbors = structure.get_all_neighbors(self.r_cut)
fingerprints = []
for i, an in enumerate(all_neighbors):
center = structure[i].coords
coords, distances = zip(*[(site.coords, d) for (site, d) in an])
v = (np.array(coords) - center)[:, :, None]
d = np.array(distances)[:, None, None]
e = np.array(self.etas)[None, None, :]
cf = 0.5 * (np.cos(np.pi * d / self.r_cut) + 1)
fpi = np.sum(v / d * np.exp(-(d / e) ** 2) * cf, axis=0)
fingerprints.append(fpi)
index = ["%d_%s" % (i, d) for i in range(len(structure))
for d in "xyz"]
df = pd.DataFrame(np.vstack(fingerprints), index=index,
columns=self.etas)
return df
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class SOAPDescriptor(Describer):
"""
Smooth Overlap of Atomic Position (SOAP) descriptor.
"""
def __init__(self, cutoff, l_max=8, n_max=8, atom_sigma=0.5):
"""
Args:
cutoff (float): Cutoff radius.
l_max (int): The band limit of spherical harmonics basis function.
Default to 8.
n_max (int): The number of radial basis function. Default to 8.
atom_sigma (float): The width of gaussian atomic density.
Default to 0.5.
"""
from veidt.potential.soap import SOAPotential
self.cutoff = cutoff
self.l_max = l_max
self.n_max = n_max
self.atom_sigma = atom_sigma
self.operator = SOAPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('quip'):
raise RuntimeError("quip has not been found.\n",
"Please refer to https://github.com/libAtoms/QUIP for ",
"further detail.")
atoms_filename = 'structure.xyz'
exe_command = ['quip']
exe_command.append('atoms_filename={}'.format(atoms_filename))
descriptor_command = ['soap']
descriptor_command.append("cutoff" + '=' + '{}'.format(self.cutoff))
descriptor_command.append("l_max" + '=' + '{}'.format(self.l_max))
descriptor_command.append("n_max" + '=' + '{}'.format(self.n_max))
descriptor_command.append("atom_sigma" + '=' + '{}'.format(self.atom_sigma))
atomic_numbers = [str(num) for num in np.unique(structure.atomic_numbers)]
n_Z = len(atomic_numbers)
n_species = len(atomic_numbers)
Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
species_Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
descriptor_command.append("n_Z" + '=' + str(n_Z))
descriptor_command.append("Z" + '=' + Z)
descriptor_command.append("n_species" + '=' + str(n_species))
descriptor_command.append("species_Z" + '=' + species_Z)
exe_command.append("descriptor_str=" + "{" +
"{}".format(' '.join(descriptor_command)) + "}")
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
descriptor_output = 'output'
p = subprocess.Popen(exe_command, stdout=open(descriptor_output, 'w'))
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'QUIP exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except Exception:
error_msg += msg[-1]
raise RuntimeError(error_msg)
with zopen(descriptor_output, 'rt') as f:
lines = f.read()
descriptor_pattern = re.compile('DESC(.*?)\n', re.S)
descriptors = pd.DataFrame([np.array(c.split(), dtype=np.float)
for c in descriptor_pattern.findall(lines)])
return descriptors
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class BPSymmetryFunctions(Describer):
"""
Behler-Parrinello symmetry function descriptor.
"""
def __init__(self, dmin, cutoff, num_symm2, a_etas):
"""
Args:
dmin (float): The minimum interatomic distance accepted.
cutoff (float): Cutoff radius.
num_symm2 (int): The number of radial symmetry functions.
a_etas (list): The choice of η' in angular symmetry functions.
"""
from veidt.potential.nnp import NNPotential
self.dmin = dmin
self.cutoff = cutoff
self.num_symm2 = num_symm2
self.a_etas = a_etas
self.operator = NNPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('RuNNer'):
raise RuntimeError("RuNNer has not been found.")
if not which("RuNNerMakesym"):
raise RuntimeError("RuNNerMakesym has not been found.")
def read_functions_data(filename):
"""
Read structure features from file.
Args:
filename (str): The functions file to be read.
"""
with zopen(filename, 'rt') as f:
lines = f.read()
block_pattern = re.compile(r'(\n\s+\d+\n|^\s+\d+\n)(.+?)(?=\n\s+\d+\n|$)', re.S)
points_features = []
for (num_neighbor, block) in block_pattern.findall(lines):
point_features = pd.DataFrame([feature.split()[1:]
for feature in block.split('\n')[:-1]],
dtype=np.float32)
points_features.append(point_features)
points_features = pd.concat(points_features,
keys=range(len(block_pattern.findall(lines))),
names=['point_index', None])
return points_features
dmin = sorted(set(structure.distance_matrix.ravel()))[1]
r_etas = self.operator.generate_eta(dmin=self.dmin,
r_cut=self.cutoff,
num_symm2=self.num_symm2)
atoms_filename = 'input.data'
mode_output = 'mode.out'
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
input_filename = self.operator.write_input(mode=1, r_cut=self.cutoff,
r_etas=r_etas, a_etas=self.a_etas,
scale_feature=False)
p = subprocess.Popen(['RuNNer'], stdout=open(mode_output, 'w'))
stdout = p.communicate()[0]
descriptors = read_functions_data('function.data')
return | pd.DataFrame(descriptors) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import time
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pathlib import Path
import context
from mhealth.utils.plotter_helper import save_figure
from mhealth.utils.commons import create_progress_bar
# Used if command-line option --parameters is not provided.
DEFAULT_PARAMETERS = ["Temperatur", "Herzfrequenz", "Atemfrequenz"]
# Data sources included in HF-AF_25052021.csv.
VALIDATION_DATA_SOURCES = ["WELCHALLYN_MONITOR", "PHILIPS_GATEWAY"]
# Half-ranges relevant for the validation: x +/- delta
DELTAS = {
"Atemfrequenz": 3, # ±3bpm
"Herzfrequenz": 10, # ±10bpm
"Temperatur": 0.5 # ±0.5°C
}
# Half-range of the for the timestamp delta, in minutes.
DELTA_TS = 2.5 # ±2.5min
# Devices are identified by the bed number they are used with.
# In case of device breakdown (or other problems), some devices
# were replaced by a device of another room. The below lookup
# specifies which the bed ids (devices) must be renamed, as well
# as the time range, between which the lookup applies.
DEVICE_REPLACEMENT_LOOKUP = {
# Alias True From To
"2653F" : ("2655F", "2021-05-14 12:00:00+02:00", None),
"2652F" : ("2656FL", "2021-05-18 00:00:00+02:00", None),
"2661TL" : ("2661FL", "2021-05-20 00:00:00+02:00", None),
"2664T" : ("2664F", "2021-05-12 00:00:00+02:00", None),
"2665T" : ("2665F", None, "2021-05-19 10:30:00+02:00"),
}
# Expected value ranges per vital parameter.
VALUE_RANGES = {
"Atemfrequenz": [0, 35],
"Herzfrequenz": [30, 130],
"Temperatur": [35, 40],
}
BIN_WIDTHS = {
"Atemfrequenz": 0.5,
"Herzfrequenz": 1,
"Temperatur": 0.01,
}
BIN_WIDTHS_VALID = {
"Atemfrequenz": 1,
"Herzfrequenz": 2,
"Temperatur": 0.1,
}
def tic():
return time.time()
def toc(label, start):
diff = time.time()-start
print(label + (": %.3f" % diff))
def check_dir(path):
if not path.is_dir():
msg = "Requested folder does not exist: %s"
raise FileNotFoundError(msg % path)
def ensure_dir(path, exist_ok=True):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=exist_ok)
return path.is_dir()
def apply_replacement_lookup(df):
print("Applying device replacements...")
def dt_to_str(dt):
return "--" if dt is None else dt.strftime("%m.%d.%y %H:%M")
for id_alias, replace_data in DEVICE_REPLACEMENT_LOOKUP.items():
id_true, repl_start, repl_stop = replace_data
repl_start = pd.to_datetime(repl_start)
repl_stop = pd.to_datetime(repl_stop)
mask = ((df["Bettenstellplatz"]==id_alias) &
((repl_start is None) or df["Timestamp"]>=repl_start) &
((repl_stop is None) or df["Timestamp"]<=repl_stop))
df.loc[mask, "Bettenstellplatz"] = id_true
print("%-6s => %-6s: %6d affected values in time range (%s, %s)"
% (id_alias, id_true, mask.sum(),
dt_to_str(repl_start), dt_to_str(repl_stop)))
print()
def read_validation_data(data_dir):
def no_whitespace(s):
return s.replace(" ", "")
def fix_time(s):
return s.replace(".", ":")
def form_timestamp(df, col_date, col_time):
timestamp = df[col_date] + " " + df[col_time]
timestamp = pd.to_datetime(timestamp, dayfirst=True)
timestamp = timestamp.dt.tz_localize("Europe/Zurich").copy()
timestamp[(df[col_date]=="") | (df[col_time]=="")] = None
return timestamp
def format_manual(df, timestamp, parameter):
df_ret = df[["Bettenstellplatz", parameter,
"Bemerkungen", "Abweichung_Trageort"]].copy()
df_ret = df_ret.rename({parameter: "Wert"}, axis=1)
icol = df_ret.columns.get_loc("Wert")
df_ret.insert(loc=icol, column="Vitalparameter", value=parameter)
df_ret.insert(loc=0, column="Timestamp", value=timestamp)
df_ret = df_ret[~df_ret["Wert"].isna()].copy()
return df_ret
def read_station_data(valid_dir):
file_path = valid_dir/"HF-AF_25052021.csv"
df = pd.read_csv(file_path,
converters={"Signatur": str.strip,
"Bettenstellplatz": str.strip})
df = df[df["Signatur"].isin(VALIDATION_DATA_SOURCES)]
timestamp = form_timestamp(df=df, col_date="Datum", col_time="Zeit")
df.insert(loc=0, column="Timestamp", value=timestamp)
df = df.drop(["Datum", "Zeit"], axis=1)
# Transform to long format.
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur"],
value_vars=["Herzfrequenz", "Atemfrequenz", "Temperatur"],
var_name="Vitalparameter", value_name="Wert")
df = df[~df["Wert"].isna()].copy()
df["Bemerkungen"] = ""
df["Abweichung_Trageort"] = ""
return df
def read_manual_data(valid_dir):
file_path = valid_dir/"Validierung_Daten_manuell_Mai2021_alle.csv"
df = pd.read_csv(file_path,
converters={"Bettenstellplatz": no_whitespace,
"Zeit_AF": fix_time,
"Zeit_HF": fix_time,
"Bemerkungen": str.strip,
"Abweichung_Trageort": str.strip})
# Atemfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_AF")
df_a = format_manual(df=df, timestamp=ts, parameter="Atemfrequenz")
# Herzfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_h = format_manual(df=df, timestamp=ts, parameter="Herzfrequenz")
# Temperatur (Zeit_Temp, use Zeit_HF is missing!)
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_t = format_manual(df=df, timestamp=ts, parameter="Temperatur")
df = pd.concat((df_a, df_h, df_t), axis=0)
df["Signatur"] = "MANUELL"
return df
print("Reading Validation data...")
valid_dir = data_dir/"original"/"validation"
check_dir(valid_dir)
df_station = read_station_data(valid_dir=valid_dir)
df_manual = read_manual_data(valid_dir=valid_dir)
df_valid = pd.concat((df_station, df_manual), axis=0)
df_valid = df_valid.sort_values(["Bettenstellplatz", "Timestamp"])
return df_valid
def read_baslerband_data(data_dir, n_max=None):
def read_bb_file(path):
# Sample path:
# ../2021-05-25/2617_FL/basler_band_DB_B4_2C_E5_CC_45_activity_file.csv
bed_id = path.parent.name.replace("_", "")
if bed_id == "2668":
bed_id = "2668E"
device_id = path.stem
device_id = device_id.replace("basler_band_", "")
device_id = device_id.replace("_activity_file", "")
df = pd.read_csv(path, index_col=[0], parse_dates=[0], sep=";")
df.index.name = "Timestamp"
# Filter by quality as specified
df = df[df["wearing"]==4]
df = df[["resp_filtered", "hrm_filtered",]]
df = df.rename({"resp_filtered": "Atemfrequenz",
"hrm_filtered": "Herzfrequenz"}, axis=1)
df["Bettenstellplatz"] = bed_id
df["DeviceID"] = device_id
df["Signatur"] = "BASLER_BAND"
df = df.reset_index(drop=False)
return df
print("Reading Basler Band data...")
bb_dir = data_dir/"original"/"basler_band"
check_dir(bb_dir)
files = bb_dir.glob("**/basler_band*activity_file.csv")
files = sorted(files)
dfs = []
progress = create_progress_bar(size=len(files),
label="Processing...")
for i, path in enumerate(files):
if i>=n_max:
break
progress.update(i)
df = read_bb_file(path=path)
dfs.append(df)
progress.finish()
df = pd.concat(dfs, axis=0)
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur", "DeviceID"],
value_vars=["Herzfrequenz", "Atemfrequenz"],
var_name="Vitalparameter", value_name="Wert")
apply_replacement_lookup(df)
df = df.sort_values(["Bettenstellplatz", "Timestamp"])
return df
def read_core_data(data_dir, n_max=None):
def read_core_file(path, columns):
# Sample path:
# ../2021-05-17/2617_FL/core_D6_BE_C5_06_B3_48_storage-cbta_d.csv
bed_id = path.parent.name.replace("_", "")
if bed_id == "2668":
bed_id = "2668E"
device_id = path.stem
device_id = device_id.replace("core_", "")
device_id = device_id.replace("_storage-cbta_d", "")
df = pd.read_csv(path, index_col=[0], parse_dates=[0], sep=";")
df.index.name = "Timestamp"
df = df.rename(columns.to_dict(), axis=1)
# Filter by quality as specified
df = df[df["quality (core only)"]==4]
df = df[["cbt [mC]",]]
df = df.rename({"cbt [mC]": "Temperatur"}, axis=1)
df["Temperatur"] /= 1000 # from °mC to °C
df["Bettenstellplatz"] = bed_id
df["DeviceID"] = device_id
df["Signatur"] = "CORE"
df = df.reset_index(drop=False)
return df
print("Reading Core data...")
core_dir = data_dir/"original"/"core"
check_dir(core_dir)
columns = pd.read_csv(core_dir/"0_storage-cbta_d_columns.csv",
skipinitialspace=True,
index_col=[0], header=None, squeeze=True)
columns.index = columns.index.astype(str)
files = core_dir.glob("**/core_*storage-cbta_d.csv")
files = sorted(files)
progress = create_progress_bar(size=len(files),
label="Processing...")
dfs = []
for i, path in enumerate(files):
if i>=n_max:
break
progress.update(i)
df = read_core_file(path=path, columns=columns)
dfs.append(df)
progress.finish()
df = | pd.concat(dfs, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""Test the views for the scheduler pages."""
import json
import os
from django.conf import settings
from django.db import IntegrityError
import pandas as pd
from ontask import tests
from ontask.table import serializers
class TableTestSerializers(tests.OnTaskTestCase):
"""Test stat views."""
fixtures = ['simple_table']
filename = os.path.join(settings.ONTASK_FIXTURE_DIR, 'simple_table.sql')
user_email = '<EMAIL>'
user_pwd = '<PASSWORD>'
workflow_name = 'wflow1'
def test_serializer_view(self):
"""Test the view serialization."""
# Try to create a view with a name that already exists.
try:
views = serializers.ViewSerializer(
data=[{
"columns": [
{"name": "email"},
{"name": "one"},
{"name": "registered"},
{"name": "when"}],
"name": "simple view",
"description_text": "",
"formula": {
"not": False,
"rules": [],
"valid": True,
"condition": "AND"},
}],
many=True,
context={
'workflow': self.workflow,
'columns': self.workflow.columns.all()
},
)
self.assertTrue(views.is_valid())
views.save()
except IntegrityError as exc:
self.assertTrue('duplicate key value violates' in str(exc))
else:
raise Exception('Incorrect serializer operation.')
# Try to create a view with a different name
views = serializers.ViewSerializer(
data=[{
"columns": [
{"name": "email"},
{"name": "one"},
{"name": "registered"},
{"name": "when"}],
"name": "simple view 2",
"description_text": "",
"formula": {
"not": False,
"rules": [],
"valid": True,
"condition": "AND"},
}],
many=True,
context={
'workflow': self.workflow,
'columns': self.workflow.columns.all()
},
)
self.assertTrue(views.is_valid())
views.save()
self.assertEqual(self.workflow.views.count(), 2)
def test_serializer_pandas(self):
"""Test the data frame serialization."""
df = pd.DataFrame(
{
'key': ['k1', 'k2'],
't1': ['t1', 't2'],
'i2': [5, 6],
'f3': [7.0, 8.0],
'b4': [True, False],
'd5': [
'2018-10-11 21:12:04+10:30',
'2018-10-12 21:12:04+10:30'],
})
df_str = serializers.df_to_string(df)
new_df = serializers.DataFramePandasSerializer(
data={'data_frame': df_str},
many=False,
)
self.assertTrue(new_df.is_valid())
new_df = new_df.validated_data['data_frame']
self.assertTrue(df.equals(new_df))
def test_serializer_json(self):
"""Test the data frame serialization with a json object"""
df = pd.DataFrame(
{
'key': ['k1', 'k2'],
't1': ['t1', 't2'],
'i2': [5, 6],
'f3': [7.0, 8.0],
'b4': [True, False],
'd5': [
'2018-10-11 21:12:04+00:00',
'2018-10-12 21:12:04+00:00'],
})
df['d5'] = | pd.to_datetime(df['d5'], infer_datetime_format=True) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script saves bid and ask data for specified ETFs to files for each day
during market open hours.
It assumes the computer is at US East Coast Time.
@author: mark
"""
import os
import pandas as pd
import numpy as np
from itertools import product
import streamlit as st
from bokeh.plotting import figure
from bokeh.models.tools import HoverTool
from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet
from streamlit_metrics import metric_row
def display_method_to_choose_etfs(selected_method_choose_dates, all_etfs, etf_data, sl_obj):
"""
Generates various streamlit options for selecting which ETFs to display.
Parameters
----------
selected_method_choose_dates : list of str
Strings of the various methods of selecting ETFs.
all_etfs : list of str
List of all ETF tickers.
etf_data : pd.DataFrame
Dataframe containing bulk data about ETFs.
sl_obj : streamlit
Stremlit object to place the elements.
Returns
-------
selected_etfs : list of str
List of str tickers chosen by users.
"""
selected_etfs = all_etfs
if 'By volume traded' in selected_method_choose_dates:
selection_data = etf_data['volume (shares/day)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Average Volume (shares/day)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'By market cap' in selected_method_choose_dates:
selection_data = etf_data['net assets (million USD)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Market Cap as of 2021-02-21 (million USD)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'Only ESG ETFs' in selected_method_choose_dates:
esg_etfs = etf_data[etf_data['esg'] == True].index
selected_etfs = list(set(selected_etfs) & set(esg_etfs))
if 'choose specific ETFs' in selected_method_choose_dates:
selected_etfs = sl_obj.multiselect('Which ETFs do you want to look at', list(selected_etfs), ['ESGV','VTI','BND', 'VCEB', 'VSGX'])
return selected_etfs
def get_averages(data, selected_dates, selected_etfs):
"""
Obtain average values of various ETFs across the trading day.
Parameters
----------
data : pd.DataFrame
data of various days and ETFs.
selected_dates : list of str
list of dates in format YYYY-MM-DD.
selected_etfs : list of str
list of ETF tickers.
Returns
-------
pd.Series
Data frame of average values in ETFs at various times during tradiing day.
"""
potential_columns = product(selected_dates, selected_etfs)
actual_columns = [x for x in potential_columns if x in data.columns]
return data[actual_columns].T.groupby(level=['etf']).mean().T
def add_trade_windows(p, t_new, t_old, ymax):
"""
Add trade windows to plot
Parameters
----------
p : Bokeh figure
Figure to add trading windows to.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
ymax : float
Maxs value to extend trading windows.
Returns
-------
None.
"""
source = ColumnDataSource(dict(x=[t_old[0]+0.5*(t_old[1]-t_old[0]),t_new[0]+0.5*(t_new[1]-t_new[0])],
y=[ymax-0.0002, ymax-0.0002 ],
w=[t_old[1]-t_old[0], t_new[1]-t_new[0]],
h =[2,2],
desc=['Old', 'New']))
if ymax > 2:
patch = {'h' : [ (0, ymax), (1, ymax) ],}
source.patch(patch)
boxes = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=0.1,
line_width=0)
boxes_select = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=.2,
line_width=0)
box_rend = p.add_glyph(source, boxes)
box_rend.hover_glyph = boxes_select
tooltips = [('trade window','@desc')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[box_rend]))
def format_plots(p, ymax=None):
"""
Format bokeh plots for quoted spreads across market times
Parameters
----------
p : Bokeh figure plot
Bokeh plot object to format
ymax : TYPE, optional
Max yaxis value. The default is None.
Returns
-------
None
"""
if ymax is None:
num_formatter='0.00%'
else:
num_zeros = int(np.log10(1/ymax)-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xaxis.formatter = DatetimeTickFormatter(hours='%H:%M')
p.xaxis.axis_label = 'Market Time'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
def make_multi_etf_plot(selected_etfs, selected_dates, t_new, t_old, quoted_spread):
"""
Make plot with multiple ETF averages
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
p : Bokeh figure
Plot of multiple ETF averages.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, selected_etfs)
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='quoted Bid-Ask Spread for various ETFs',
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
#trading windows
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
for etf in selected_etfs:
renders.append(p.line(average_data.index, average_data[etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
# set visual properties for non-selected glyphs
color="grey",
alpha=0.5,
name=etf))
tooltips = [('etf','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p, ymax=average_data.max().max()+0.0001)
return p
def make_single_etf_plot(selected_etf, selected_dates, t_new, t_old, quoted_spread, supress_hover_after= 10000):
"""
Plots data for a single ETF for multiple days.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to plot. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
supress_hover_after : int, optional
Do not show hover functionality if there are more than this number of days. The default is 10000.
Returns
-------
p : Bokeh figure
Plot of single ETF over various days.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, [selected_etf])
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Quoted spread for {}'.format(selected_etf),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
if len(selected_dates) > 1:
for date in selected_dates:
try:
render = p.line(quoted_spread.index, quoted_spread.loc[:,(date,selected_etf)],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.33,
color="grey",
alpha=0.25,
name=date)
except KeyError:
continue
if len(selected_dates) < supress_hover_after:
renders.append(render)
average_name = 'average'
else:
average_name = selected_dates[0]
renders.append(p.line(average_data.index, average_data[selected_etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.75,
color="black",
alpha=0.5,
name=average_name))
tooltips = [('date','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
return p
def make_bid_ask_plot(selected_etf, selected_date, t_new, t_old, directory):
"""
Plots bid and ask prices over one trading day for one ETF.
Parameters
----------
selected_etf : str
ETF ticker of data to show.
selected_date : str
Date of data to show. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
directory : str
Folder containing ETF bid and ask price data. File must be in format date_etf.csv.
Returns
-------
p : Bokeh figure
Plot of bid and ask prices.
"""
data = pd.read_csv(os.path.join(directory, '{}_{}.csv'.format(selected_date, selected_etf)), index_col=0)
basetime = pd.to_datetime('2021-01-01') + pd.Timedelta(hours=9, minutes=30)
timedeltas = pd.TimedeltaIndex([pd.Timedelta(seconds=x) for x in data.index])
data.index = timedeltas + basetime
t_all = t_new + t_old
bid = data.bid
ask = data.ask
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Bid & ask prices for {} on {}'.format(selected_etf, selected_date),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(min(bid.min(),ask.min())-0.2, max(bid.max(),ask.max())+0.2))
add_trade_windows(p, t_new, t_old, max(bid.max(),ask.max()))
renders = []
renders.append(p.line(bid.index, bid.values,# set visual properties for selected glyphs
hover_color="blue",
hover_alpha=1,
color="blue",
alpha=.5,
name='bid'))
renders.append(p.line(ask.index, ask.values,# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
color="firebrick",
alpha=0.5,
name='ask'))
tooltips = [('type','$name'),
('time','$x{%H:%M}'),
('price', '$y{"$0.00"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
p.yaxis.formatter = NumeralTickFormatter(format="$0.00")
return p
def make_relative_fee_amount(selected_ratios, t_new_text = ''):
"""
Generate a bar plot for the ratio of quoted spread to expense ratio.
Parameters
----------
selected_ratios : pd.Series
Data of ratio of quoted spread to expense ratio.
t_new_text : str
Time range to place in title of plot.
Returns
-------
p : Bokeh figure
Produced plot.
"""
p = figure(plot_width=400, plot_height=400,
x_axis_label="ETFs", x_minor_ticks=len(selected_ratios),
toolbar_location='below', title='Ratio of quoted spread to expense ratio {}'.format(t_new_text))
source = ColumnDataSource(dict(x=range(len(selected_ratios)),
top=selected_ratios.values,
desc=selected_ratios.index,))
glyph = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='grey',
line_width=0, fill_alpha=0.5)
glyph_hover = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick',
line_width=0, fill_alpha=1)
rend = p.add_glyph(source, glyph)
rend.hover_glyph = glyph_hover
labels = LabelSet(x='x', level='glyph', source=source, render_mode='canvas')
tooltips = [('etf','@desc'),
('ratio','@top')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[rend]))
num_zeros = int(np.log10(1/selected_ratios.max())-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
p.xaxis.bounds = (-.5,len(selected_ratios)-.5)
p.xaxis.ticker = list(range(len(selected_ratios)))
p.xaxis.major_label_overrides = dict(zip(range(len(selected_ratios)), list(selected_ratios.index)))
p.xaxis.major_label_orientation = 3.14/2
return p
def get_quoted_spread_change(selected_etfs, selected_dates, t_old, t_new, quoted_spread):
"""
Get the relative change in average quoted spread between the two time windows.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
pd.Series
The relative change in average quoted spread between the two time windows.
"""
df = get_averages(quoted_spread, selected_dates, selected_etfs)
old_quotes = df[(df.index > t_old[0]) & (df.index < t_old[1])].mean(0)
new_quotes = df[(df.index > t_new[0]) & (df.index < t_new[1])].mean(0)
return (new_quotes / old_quotes).sort_values(ascending=False)
def create_metrics(fractional_increase, nwide=4, container=st, max_rows=2):
"""
Print information about fractional change in quoted spreads in metric form
Parameters
----------
fractional_increase : pd.Series
Data of the increase in fees between two windows.
nwide : int, optional
Number of metrics to print side-by-side. The default is 4.
container : streamlit object, optional
Object to display metrics. The default is st.
max_rows : int, optional
Max number of rows to present data for. The default is 2.
Returns
-------
None.
"""
metrics = {}
rows = 0
for etf, val in dict(fractional_increase).items():
if len(metrics) == nwide:
with container:
metric_row(metrics)
metrics = {}
rows += 1
if rows == max_rows:
break
metrics[etf] = '{:.0f}%'.format((val-1)*100)
if len(metrics) > 0:
with container:
metric_row(metrics)
st.write("# Bid-Ask spreads. Does time of day matter?")
st.write("#### By <NAME>")
st.write('first published March 10, 2021')
intro = st.beta_expander("Introduction")
data_selection = st.beta_expander("Data selection")
results = st.beta_expander("Results")
conclusion = st.beta_expander("Conclusion")
methods = st.beta_expander("Methods")
disclaimer = st.beta_expander("Disclaimer")
quoted_spread = pd.read_pickle('data/quoted_spread.pkl')
# remove outliers that impact average
del quoted_spread[('2020-12-16', 'SPCX')] # high value on second day of trading
del quoted_spread[('2020-03-12', 'ESGU')] # short high value on during large uncertainty
del quoted_spread[('2020-03-17', 'DRIV')] # short high value on during large uncertainty
del quoted_spread[('2020-02-03', 'EAGG')] # short high value on during large uncertainty
all_dates = list(quoted_spread.columns.levels[0])
all_dates.sort()
all_etfs = list(quoted_spread.columns.levels[1])
etf_data = pd.read_csv('etf.csv', index_col='Symbol')
etf_data = etf_data[etf_data['for_data'] == True]
start, end = data_selection.select_slider('Dates to analyze', all_dates, (all_dates[0], all_dates[-1]))
selected_dates = all_dates[all_dates.index(start):all_dates.index(end)]
method_choose_etfs = data_selection.multiselect('Methods for selecting ETFs',
['By volume traded', 'By market cap', 'Only ESG ETFs', 'choose specific ETFs'], ['choose specific ETFs'])
selected_etfs = display_method_to_choose_etfs(method_choose_etfs, all_etfs,etf_data,sl_obj=data_selection)
left_column, right_column = data_selection.beta_columns(2)
t_old = right_column.slider('Old trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 10:00').to_pydatetime(), pd.Timestamp('2021-01-01 10:15').to_pydatetime()),
step= | pd.Timedelta(minutes=5) | pandas.Timedelta |
# Train fastText model
import argparse
import csv
import multiprocessing
import os
import sys
import time
csv.field_size_limit(sys.maxsize)
import fasttext
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc, average_precision_score
VERBOSITY = 2
WORDNGRAMS = 1
MINN = 0
MAXN = 0
MAXTHREADS = min(16, multiprocessing.cpu_count() - 1)
LOSS = 'ova'
def ft_to_toplevel(fasttext_lbl):
return fasttext_lbl.replace('__label__','').split('.')[0]
def grid_search(train_fn, val_fn, learning_rates, minCounts, epochs, ws, wvs, ndims):
best_lr = None
best_ndim = None
best_minCount = None
best_epochs = None
best_ws = None
best_wv = None
highest_f1 = float("-inf")
label_counts_val = {}
with open(val_fn) as fin:
for line in fin:
lbls = [l for l in line.strip().split(" ") if l.startswith('__label__')]
for lbl in lbls:
label_counts_val[lbl] = label_counts_val.get(lbl, 0) + 1
label_counts_train = {}
with open(train_fn) as fin:
for line in fin:
lbls = [l for l in line.strip().split(" ") if l.startswith('__label__')]
for lbl in lbls:
label_counts_train[lbl] = label_counts_train.get(lbl, 0) + 1
grid_search_results = []
wv = wvs
for lr in learning_rates:
for minCount in minCounts:
for epoch in epochs:
for w in ws:
for i in range(0, len(ndims)):
ndim = ndims[i]
print("Building fasttext model: {0} lr; {1} dim; {2} min count; {3} epochs. {4} ws. wv: {5}.".format(
lr, ndim, minCount, epoch, w, wv))
# train model
model = fasttext.train_supervised(input=train_fn,
minCount=minCount,
wordNgrams=WORDNGRAMS,
pretrainedVectors=wv,
lr=lr,
epoch=epoch,
dim=ndim,
ws=w,
minn=MINN,
maxn=MAXN,
thread=MAXTHREADS,
loss=LOSS,
verbose=VERBOSITY)
# val
results_by_lbl = model.test_label(val_fn, threshold=0.5, k=-1)
f1_scores, support = zip(*[(res['f1score'], label_counts_val[lbl]) for lbl, res in results_by_lbl.items() if lbl in label_counts_val])
macro_f1 = np.average(f1_scores)
micro_f1 = np.average(f1_scores, weights=support)
f1_avg = np.average([micro_f1, macro_f1])
if f1_avg > highest_f1:
best_lr = lr
best_ndim = ndim
best_minCount = minCount
best_epochs = epoch
best_ws = w
best_wv = wv
highest_f1 = f1_avg
# train (check overfitting)
results_by_lbl = model.test_label(train_fn, threshold=0.5, k=-1)
f1_scores, support = zip(
*[(res['f1score'], label_counts_train[lbl]) for lbl, res in results_by_lbl.items() if
lbl in label_counts_train])
tr_macro_f1 = np.average(f1_scores)
tr_micro_f1 = np.average(f1_scores, weights=support)
print("{0:.3f} micro f1. {1:.3f} macro f1. {2:.3f} train micro f1. {3:.3f} train macro f1".format(
micro_f1, macro_f1, tr_micro_f1, tr_macro_f1))
grid_search_results.append({'lr':lr, 'ndim':ndim, 'minCount':minCount, 'epoch':epoch, 'ws':w,
'val_micro_f1':micro_f1, 'val_macro_f1':macro_f1,
'tra_micro_f1':tr_micro_f1, 'tra_macro_f1':tr_macro_f1, 'wv':wv})
print("\n==== Grid Search Results====\n")
print(pd.DataFrame(grid_search_results)[
['lr','ndim','minCount','epoch','ws','val_micro_f1','tra_micro_f1', 'val_macro_f1', 'tra_macro_f1', 'wv']])
print("\nBest: {0} lr; {1} dim; {2} min count; {3} epochs; {4} ws; {5} wv\n".format(best_lr, best_ndim, best_minCount, best_epochs, best_ws, best_wv))
return best_lr, best_ndim, best_minCount, best_epochs, best_ws, best_wv
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--training_data",
default="/home/isaacj/fastText/drafttopic/wikitext/enwiki.balanced_article_sample.w_article_text_50413_train_data.txt")
parser.add_argument("--val_data",
default="/home/isaacj/fastText/drafttopic/wikitext/enwiki.balanced_article_sample.w_article_text_6301_val_data.txt")
parser.add_argument("--test_data",
default="/home/isaacj/fastText/drafttopic/wikitext/enwiki.balanced_article_sample.w_article_text_6303_test_data.txt")
parser.add_argument("--false_negatives_fn")
parser.add_argument("--output_model")
parser.add_argument("--word_vectors", default='')
parser.add_argument("--learning_rates", nargs="+", default=[0.1], type=float)
parser.add_argument("--minCounts", nargs="+", default=[3], type=int)
parser.add_argument("--epochs", nargs="+", default=[25], type=int)
parser.add_argument("--ws", nargs="+", default=[20], type=int)
parser.add_argument("--ndims", nargs="+", default=[50], type=int)
parser.add_argument("--mlc_res_tsv")
args = parser.parse_args()
if os.path.exists(args.output_model):
print("Loading model:", args.output_model)
model = fasttext.load_model(args.output_model)
else:
if args.val_data and len(args.learning_rates + args.minCounts + args.epochs + args.ws + args.ndims) > 5:
lr, ndim, minCount, epochs, ws, wv = grid_search(
args.training_data, args.val_data, args.learning_rates, args.minCounts, args.epochs, args.ws, args.word_vectors, args.ndims)
else:
lr = args.learning_rates[0]
minCount = args.minCounts[0]
epochs = args.epochs[0]
ws = args.ws[0]
wv = args.word_vectors
ndim = args.ndims[0]
print("Building fasttext model: {0} lr; {1} min count; {2} epochs; {3} ws; wv: {4}".format(
lr, minCount, epochs, ws, wv))
start = time.time()
model = fasttext.train_supervised(input=args.training_data,
minCount=minCount,
wordNgrams=WORDNGRAMS,
lr=lr,
epoch=epochs,
pretrainedVectors=wv,
ws=ws,
dim=ndim,
minn=MINN,
maxn=MAXN,
thread=MAXTHREADS,
loss=LOSS,
verbose=VERBOSITY)
print("{0} seconds elapsed in training.".format(time.time() - start))
if args.output_model:
print("Dumping fasttext model to {0}".format(args.output_model))
model.save_model(args.output_model)
if args.test_data:
# build statistics dataframe for printing
print("==== test statistics ====")
lbl_statistics = {}
toplevel_statistics = {}
threshold = 0.5
all_lbls = model.get_labels()
for lbl in all_lbls:
lbl_statistics[lbl] = {'n': 0, 'FP': 0, 'TP': 0, 'FN': 0, 'TN': 0, 'true':[], 'pred':[]}
toplevel_statistics[ft_to_toplevel(lbl)] = {'n': 0, 'FP': 0, 'TP': 0, 'FN': 0, 'TN': 0}
with open(args.test_data, 'r') as fin:
for line_no, datapoint in enumerate(fin):
_, topics = model.get_line(datapoint.strip())
prediction = model.predict(datapoint.strip(), k=-1)
predicted_labels = []
for idx in range(len(prediction[0])):
prob = prediction[1][idx]
lbl = prediction[0][idx]
lbl_statistics[lbl]['true'].append(int(lbl in topics))
lbl_statistics[lbl]['pred'].append(prob)
if prob > threshold:
predicted_labels.append(lbl)
for lbl in all_lbls:
if lbl in topics and lbl in predicted_labels:
lbl_statistics[lbl]['n'] += 1
lbl_statistics[lbl]['TP'] += 1
elif lbl in topics:
lbl_statistics[lbl]['n'] += 1
lbl_statistics[lbl]['FN'] += 1
elif lbl in predicted_labels:
lbl_statistics[lbl]['FP'] += 1
else:
lbl_statistics[lbl]['TN'] += 1
toplevel_topics = [ft_to_toplevel(l) for l in topics]
toplevel_predictions = [ft_to_toplevel(l) for l in predicted_labels]
for lbl in toplevel_statistics:
if lbl in toplevel_topics and lbl in toplevel_predictions:
toplevel_statistics[lbl]['n'] += 1
toplevel_statistics[lbl]['TP'] += 1
elif lbl in toplevel_topics:
toplevel_statistics[lbl]['n'] += 1
toplevel_statistics[lbl]['FN'] += 1
elif lbl in toplevel_predictions:
toplevel_statistics[lbl]['FP'] += 1
else:
toplevel_statistics[lbl]['TN'] += 1
for lbl in all_lbls:
s = lbl_statistics[lbl]
fpr, tpr, _ = roc_curve(s['true'], s['pred'])
s['pr-auc'] = auc(fpr, tpr)
s['avg_pre'] = average_precision_score(s['true'], s['pred'])
try:
s['precision'] = s['TP'] / (s['TP'] + s['FP'])
except ZeroDivisionError:
s['precision'] = 0
try:
s['recall'] = s['TP'] / (s['TP'] + s['FN'])
except ZeroDivisionError:
s['recall'] = 0
try:
s['f1'] = 2 * (s['precision'] * s['recall']) / (s['precision'] + s['recall'])
except ZeroDivisionError:
s['f1'] = 0
for lbl in toplevel_statistics:
s = toplevel_statistics[lbl]
try:
s['precision'] = s['TP'] / (s['TP'] + s['FP'])
except ZeroDivisionError:
s['precision'] = 0
try:
s['recall'] = s['TP'] / (s['TP'] + s['FN'])
except ZeroDivisionError:
s['recall'] = 0
try:
s['f1'] = 2 * (s['precision'] * s['recall']) / (s['precision'] + s['recall'])
except ZeroDivisionError:
s['f1'] = 0
print("\n=== Mid Level Categories ===")
mlc_statistics = | pd.DataFrame(lbl_statistics) | pandas.DataFrame |
from os import abort
from requests import get
from bs4 import BeautifulSoup
from pandas import read_html, concat, DataFrame, read_csv
from .utils import url_daerah, total_page, _baseurl_
def get_daerah() -> list:
page = get(_baseurl_)
data = []
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find_all('td')
for i in table:
name = i.find('a').text.strip()
link = i.find('a')['href'].strip().split('/')[-2]
data.append({
"Nama" : name,
"Link": link
})
return data
def setup_provinsi():
list_url = url_daerah()
for i in list_url:
tpage = total_page('{}/{}'.format(_baseurl_, i))
data = []
for j in range(tpage+1):
url = 'https://carikodepos.com/daerah/{}/page/{}/'.format(i,j)
r = get(url)
res = read_html(r.text)
data.append(res)
tail1 = data[len(data)-1][0]
try:
tail2 = data[len(data)-1][1]
except:
tail2 = DataFrame()
for k in range(len(data[:-1])):
data1 = data[k][0].convert_dtypes()
data2 = data[k][1].convert_dtypes()
if k == 0:
df1 = concat([tail1, data1])
df2 = | concat([tail2, data2]) | pandas.concat |
import os
import random
import sys
import pandas as pd
from ATM import welcome
from Validate import validateDetails2, validateLogin
filePath = r".\{}.csv".format("atm")
if not os.path.isfile(filePath) or os.path.getsize(filePath) == 0:
df = pd.DataFrame({"firstName": [], "lastName": [], "email": [], "address": [], "accountNumber": [],
"password": [], "contact": []})
df.to_csv(filePath, index=False)
def generateAccountNumber():
accountNumber = random.randrange(1000000000, 9999999999)
boolAccount = validateDetails2(accountNumber=accountNumber) # Checks if account number has been used or not.
if not boolAccount: # Recreates account number until un-used account generated
generateAccountNumber()
print("Account number Validated!")
return accountNumber # Returns generated un-used account number.
def register():
# To register, we need Name, Email, Account number, Password, Address, contact
print("\n" + "-" * 15 + "REGISTRATION" + "-" * 15)
welcomePrompt = "\nTake a few minutes to register an account with us.\n" \
"Please fill the following details as accurately as possible\n"
print(welcomePrompt)
firstName = input("Enter your First name: ")
lastName = input("Enter your Last name: ")
email = input("Enter your Email Address: ")
address = input("Enter your Home Address: ")
while True: # Get contact until up to 11 digits is entered
contact = input("Enter your Phone number: ")
if len(contact) == 11:
break
else:
print("Incorrect Phone number, try again.")
boolValue = validateDetails2(contact=contact, email=email) # Check if details exist
if boolValue:
print("\nGenerating Account Number...")
accountNumber = str(generateAccountNumber())
print("Your Account Number is", accountNumber)
while True:
password = input("Enter password (must be 8 digits or more): ")
if len(password) >= 8:
break
print("\nPlease take note of your account number and password.\n")
registration = {"firstName": firstName, "lastName": lastName, "email": email, "address": address,
"accountNumber": accountNumber, "password": password, "contact": contact, "Balance": 0}
data = pd.read_csv(filePath, dtype=str)
data = data.append(pd.DataFrame([registration]))
data.to_csv(filePath, index=False)
print("Registration Successful.\nRedirecting you to Login.")
else:
print("\nUser already exists, try login.")
return login()
def login():
print("\n" + "-" * 15 + "LOGIN" + "-" * 15)
print("\nPlease fill in the following details.\n")
accountNumber = input("Enter your account number: ")
password = input("Enter your password: ")
boolValue, userPosition = validateLogin(accountNumber=accountNumber, password=password)
if boolValue:
welcome(userPosition)
else:
retrieveAccount = input("Retrieve account number (yes/no): ")
if retrieveAccount == 'yes':
suggestion = retrieveAccountNumber()
if suggestion:
login()
else:
register()
return main()
def retrieveAccountNumber():
getEmail = input("Enter your email: ")
getContact = input("Enter your Phone number: ")
data = | pd.read_csv(filePath, dtype=str) | pandas.read_csv |
import os
from itertools import product
import altair as alt
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bayes_window import BayesRegression, LMERegression, BayesConditions
from bayes_window import models
from bayes_window import utils
from bayes_window.fitting import fit_numpyro
from bayes_window.generative_models import generate_fake_lfp
from jax import random
from joblib import Parallel, delayed
from numpyro.infer import Predictive
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
trans = LabelEncoder().fit_transform
def make_confusion_matrix(res, groups):
df = []
for _, this_res in res.groupby(list(groups)):
try:
this_res['score'] = this_res['score'].replace({'': None}).astype(float)
except TypeError:
pass
this_res['true_slope'] = this_res['true_slope'] > 0
this_res['score'] = this_res['score'] > 0
cm = confusion_matrix(this_res['true_slope'],
this_res['score'],
labels=this_res['true_slope'].unique())
cm = [y for i in cm for y in i]
roll = list(product(np.unique(this_res['true_slope']), repeat=2))
this_res = this_res.drop(['true_slope', 'score'], axis=1)
for i in range(len(roll)):
rez = {'actual': roll[i][0],
'predicted': roll[i][1],
'Occurences': cm[i],
}
rez.update(this_res.iloc[0].to_dict())
df.append(rez)
# Remove raw scores to reduce confusion
return pd.DataFrame.from_records(df)
def plot_confusion(df):
# plot
base = alt.Chart(df)
heat = base.mark_rect().encode(
x="predicted",
y="actual",
color='Occurences:O'
).properties(width=180, height=180)
# Configure text
# Doesnt work without mean; mean is meaningless without groupby
# text = base.mark_text(baseline='middle').encode(
# text=alt.Text('mean(Occurences)', format=",.1f", ),
# x="predicted",
# y="actual",
# # color=alt.condition(
# # alt.datum.Occurences > df['Occurences'].mean(),
# # alt.value('black'),
# # alt.value('white')
# # )
# )
return heat
def make_roc_auc(res, binary=True, groups=('method', 'y', 'randomness', 'n_trials')):
""" Vary as function of true_slope """
df = []
for head, this_res in res.groupby(list(groups)):
try:
this_res['score'] = this_res['score'].replace({'': None}).astype(float)
except TypeError:
pass
if binary:
this_res['score'] = this_res['score'] > 0
else:
this_res = this_res.dropna(subset=['score']) # drop nans to prevent errors
fprs = []
tprs = []
slopes = []
# Loop over true_slopes
for ts in this_res[this_res['true_slope'] > 0]['true_slope'].unique():
# Select all no-slopes and this slope:
x = this_res[(this_res['true_slope'] == 0) |
(this_res['true_slope'] == ts)]
# Binarize:
x['true_slope'] = x['true_slope'] > 0
fpr, tpr, _ = roc_curve(x['true_slope'], x['score'])
# print(f"Yes for {head} {round(ts, 2)}: {fpr, tpr}")
# print(f"{x['true_slope'].values},\n {x['score'].values}")
fprs.extend(fpr)
tprs.extend(tpr)
slopes.extend(np.repeat(ts, len(fpr)))
rocs = {'False positive rate': fprs,
'True positive rate': tprs,
'AUC': round(auc(fpr, tpr), 5),
'true_slope': slopes
}
rocs = | pd.DataFrame(rocs) | pandas.DataFrame |
import requests
import pandas as pd
import pickle
import datetime
import guithread
import numpy as np
import concurrent.futures
import time
from os import makedirs
from config import text_width, max_thread_count
class Acquisition(guithread.GUIThread):
def __init__(self, filename='default.csv', brain_region='All', species='All', cell_type='All'):
self.filename = filename
self.brain_region, self.species, self.cell_type = brain_region, species, cell_type
self.session = requests.Session()
self.params_widg = {}
if brain_region != 'All':
self.params_widg['brain_region'] = 'brain_region:' + brain_region
if species != 'All':
self.params_widg['species'] = 'species:' + species
if cell_type != 'All':
self.params_widg['cell_type'] = 'cell_type:' + cell_type
self.params = {}
self.params['page'] = 0
self.params['size'] = 500
fq = []
first = 0
for key, value in self.params_widg.items():
if first == 0:
first = 1
self.params['q'] = value
else:
fq.append(value)
self.params['fq'] = fq
if brain_region == 'All' and species == 'All' and cell_type == 'All':
self.url = 'http://neuromorpho.org/api/neuron'
else:
self.url = 'http://neuromorpho.org/api/neuron/select'
super().__init__()
def get_first_page(self):
brain_region, species, cell_type = self.brain_region, self.species, self.cell_type
s = self.session
first_page_response = s.get(self.url, params=self.params)
print(first_page_response.json())
if first_page_response.status_code == 404 or first_page_response.status_code == 500:
self.print_to_textbox("Unable to get CSV! Status code: " + str(first_page_response.status_code))
return 0
elif 'status' in first_page_response.json() and first_page_response.json()['status'] == 500:
self.print_to_textbox("Unable to get CSV! Status code: " + str(first_page_response.json()['status']))
return 0
print(str(first_page_response.request.url))
print(first_page_response.status_code)
return first_page_response.json()['page']['totalPages'], first_page_response.json()['page']['totalElements']
def get_morphometry(self, np_array):
morphometry = []
for i in np_array:
url = "http://neuromorpho.org/api/morphometry/id/" + str(i)
response = self.session.get(url)
response.raise_for_status()
json_data = response.json()
morphometry.append(json_data)
if response.status_code == 200:
text_status_code = '\u2705'
else:
text_status_code = '\u274C'
self.print_to_textbox('Querying cells {} -> status code: {} {}'.format(
str(i), response.status_code, text_status_code)
)
return morphometry
def run(self):
file_name = ""
try:
brain_region, species, cell_type = self.brain_region, self.species, self.cell_type
s = self.session
starttime = datetime.datetime.now()
self.set_progress(0)
self.print_to_textbox(brain_region + '\n' + species + '\n' + cell_type + '\n')
totalPages, totalElements = self.get_first_page()
self.print_to_textbox("Getting Neurons - total elements:" + str(totalElements) +
"\nDo you want to continue?")
timer = 10
while self.is_paused and not self.is_killed:
time.sleep(1)
timer -= 1
self.print_to_textbox("Will continue in: " + str(timer) + " seconds")
if timer == 0:
break
if self.is_killed:
self.print_to_textbox("CANCELLED!!!")
self.print_to_textbox("\n" + "#" * text_width + "\n")
self.set_progress(0)
return 0
self.print_to_textbox("Continuing...")
df_dict = {
'NeuronID': list(),
'Neuron Name': list(),
'Archive': list(),
'Note': list(),
'Age Scale': list(),
'Gender': list(),
'Age Classification': list(),
'Brain Region': list(),
'Cell Type': list(),
'Species': list(),
'Strain': list(),
'Scientific Name': list(),
'Stain': list(),
'Experiment Condition': list(),
'Protocol': list(),
'Slicing Direction': list(),
'Reconstruction Software': list(),
'Objective Type': list(),
'Original Format': list(),
'Domain': list(),
'Attributes': list(),
'Magnification': list(),
'Upload Date': list(),
'Deposition Date': list(),
'Shrinkage Reported': list(),
'Shrinkage Corrected': list(),
'Reported Value': list(),
'Reported XY': list(),
'Reported Z': list(),
'Corrected Value': list(),
'Corrected XY': list(),
'Corrected Z': list(),
'Slicing Thickness': list(),
'Min Age': list(),
'Max Age': list(),
'Min Weight': list(),
'Max Weight': list(),
'Png URL': list(),
'Reference PMID': list(),
'Reference DOI': list(),
'Physical Integrity': list()}
self.print_to_textbox("Getting Neurons - total pages:" + str(totalPages))
progress_step = 20.0/totalPages
for pageNum in range(totalPages):
self.params['page'] = pageNum
response = s.get(self.url, params=self.params)
if response.status_code == 200:
text_status_code = '\u2705'
else:
text_status_code = '\u274C'
self.print_to_textbox('Querying page {} -> status code: {} {}'.format(
pageNum, response.status_code, text_status_code))
if response.status_code == 200: # only parse successful requests
data = response.json()
for row in data['_embedded']['neuronResources']:
df_dict['NeuronID'].append(str(row['neuron_id']))
df_dict['Neuron Name'].append(str(row['neuron_name']))
df_dict['Archive'].append(str(row['archive']))
df_dict['Note'].append(str(row['note']))
df_dict['Age Scale'].append(str(row['age_scale']))
df_dict['Gender'].append(str(row['gender']))
df_dict['Age Classification'].append(str(row['age_classification']))
df_dict['Brain Region'].append(str(row['brain_region']))
df_dict['Cell Type'].append(str(row['cell_type']))
df_dict['Species'].append(str(row['species']))
df_dict['Strain'].append(str(row['strain']))
df_dict['Scientific Name'].append(str(row['scientific_name']))
df_dict['Stain'].append(str(row['stain']))
df_dict['Experiment Condition'].append(str(row['experiment_condition']))
df_dict['Protocol'].append(str(row['protocol']))
df_dict['Slicing Direction'].append(str(row['slicing_direction']))
df_dict['Reconstruction Software'].append(str(row['reconstruction_software']))
df_dict['Objective Type'].append(str(row['objective_type']))
df_dict['Original Format'].append(str(row['original_format']))
df_dict['Domain'].append(str(row['domain']))
df_dict['Attributes'].append(str(row['attributes']))
df_dict['Magnification'].append(str(row['magnification']))
df_dict['Upload Date'].append(str(row['upload_date']))
df_dict['Deposition Date'].append(str(row['deposition_date']))
df_dict['Shrinkage Reported'].append(str(row['shrinkage_reported']))
df_dict['Shrinkage Corrected'].append(str(row['shrinkage_corrected']))
df_dict['Reported Value'].append(str(row['reported_value']))
df_dict['Reported XY'].append(str(row['reported_xy']))
df_dict['Reported Z'].append(str(row['reported_z']))
df_dict['Corrected Value'].append(str(row['corrected_value']))
df_dict['Corrected XY'].append(str(row['corrected_xy']))
df_dict['Corrected Z'].append(str(row['corrected_z']))
df_dict['Slicing Thickness'].append(str(row['slicing_thickness']))
df_dict['Min Age'].append(str(row['min_age']))
df_dict['Max Age'].append(str(row['max_age']))
df_dict['Min Weight'].append(str(row['min_weight']))
df_dict['Max Weight'].append(str(row['max_weight']))
df_dict['Png URL'].append(str(row['png_url']))
df_dict['Reference PMID'].append(str(row['reference_pmid']))
df_dict['Reference DOI'].append(str(row['reference_doi']))
df_dict['Physical Integrity'].append(str(row['physical_Integrity']))
self.set_progress(pageNum * progress_step)
self.set_progress(20)
self.print_to_textbox("Creating neuron Data Frame")
neurons_df = pd.DataFrame(df_dict)
self.set_progress(25)
self.print_to_textbox("Pickling neurons")
makedirs("./output", exist_ok=True)
neurons_df.to_pickle("./output/neurons.pkl")
self.set_progress(30)
# the ID number of previously obtained neurons is used to obtain their morphometric details
np_array = neurons_df['NeuronID'].to_numpy()
np_arrays = np.array_split(np_array, max_thread_count)
self.print_to_textbox("Getting morphometry")
morphometry = []
progress_step = 40.0 / np_array.size
progress_value = 0.0
with concurrent.futures.ThreadPoolExecutor(max_workers=max_thread_count) as executor:
futures = []
for n in np_arrays:
futures.append(executor.submit(self.get_morphometry, np_array=n))
for future in concurrent.futures.as_completed(futures):
morphometry.extend(future.result())
print(morphometry)
self.set_progress(70)
self.print_to_textbox("Creating morphometry Data Frame")
df_dict = {}
df_dict['NeuronID'] = []
df_dict['Surface'] = []
df_dict['Volume'] = []
df_dict['Soma surface'] = []
df_dict['Number of stems'] = []
df_dict['Number of bifurcations'] = []
df_dict['Number of branches'] = []
df_dict['Width'] = []
df_dict['Height'] = []
df_dict['Depth'] = []
df_dict['Diameter'] = []
df_dict['Euclidian distance'] = []
df_dict['Path distance'] = []
df_dict['Branch order'] = []
df_dict['Contraction'] = []
df_dict['Fragmentation'] = []
df_dict['Partition asymmetry'] = []
df_dict['Pk classic'] = []
df_dict['Bifurcation angle local'] = []
df_dict['Fractal dimension'] = []
df_dict['Bifurcation angle remote'] = []
df_dict['Length'] = []
for row in morphometry:
df_dict['NeuronID'].append(str(row['neuron_id']))
df_dict['Surface'].append(str(row['surface']))
df_dict['Volume'].append(str(row['volume']))
df_dict['Soma surface'].append(str(row['soma_Surface']))
df_dict['Number of stems'].append(str(row['n_stems']))
df_dict['Number of bifurcations'].append(str(row['n_bifs']))
df_dict['Number of branches'].append(str(row['n_branch']))
df_dict['Width'].append(str(row['width']))
df_dict['Height'].append(str(row['height']))
df_dict['Depth'].append(str(row['depth']))
df_dict['Diameter'].append(str(row['diameter']))
df_dict['Euclidian distance'].append(str(row['eucDistance']))
df_dict['Path distance'].append(str(row['pathDistance']))
df_dict['Branch order'].append(str(row['branch_Order']))
df_dict['Contraction'].append(str(row['contraction']))
df_dict['Fragmentation'].append(str(row['fragmentation']))
df_dict['Partition asymmetry'].append(str(row['partition_asymmetry']))
df_dict['Pk classic'].append(str(row['pk_classic']))
df_dict['Bifurcation angle local'].append(str(row['bif_ampl_local']))
df_dict['Fractal dimension'].append(str(row['fractal_Dim']))
df_dict['Bifurcation angle remote'].append(str(row['bif_ampl_remote']))
df_dict['Length'].append(str(row['length']))
morphometry_df = pd.DataFrame(df_dict)
self.set_progress(75)
self.print_to_textbox("Pickling morphometry")
morphometry_df.to_pickle("./output/morphometry.pkl")
# the following is a list of steps used to currate the morphometric data
# and merge the two obtained dataframes (general neuron parameters and morphometric data)
# this results in the creation of final .pkl and .csv files at the end of the notebook
neurons = open("./output/morphometry.pkl", "rb")
neurons_df = pickle.load(neurons)
neurons.close()
self.set_progress(80)
self.print_to_textbox(neurons_df)
neurons_df = neurons_df.replace({'Soma surface': {'None': ''}}, regex=True)
neurons_df["Surface"] = pd.to_numeric(neurons_df["Surface"], downcast="float")
neurons_df["Volume"] = pd.to_numeric(neurons_df["Volume"], downcast="float")
neurons_df["Soma surface"] = pd.to_numeric(neurons_df["Soma surface"], downcast="float")
neurons_df["Number of stems"] = pd.to_numeric(neurons_df["Number of stems"], downcast="float")
neurons_df["Number of bifurcations"] = pd.to_numeric(neurons_df["Number of bifurcations"], downcast="float")
neurons_df["Number of branches"] = pd.to_numeric(neurons_df["Number of branches"], downcast="float")
neurons_df["Width"] = pd.to_numeric(neurons_df["Width"], downcast="float")
neurons_df["Height"] = pd.to_numeric(neurons_df["Height"], downcast="float")
neurons_df["Depth"] = pd.to_numeric(neurons_df["Depth"], downcast="float")
neurons_df["Diameter"] = pd.to_numeric(neurons_df["Diameter"], downcast="float")
neurons_df["Euclidian distance"] = pd.to_numeric(neurons_df["Euclidian distance"], downcast="float")
neurons_df["Path distance"] = pd.to_numeric(neurons_df["Path distance"], downcast="float")
neurons_df["Branch order"] = pd.to_numeric(neurons_df["Branch order"], downcast="float")
neurons_df["Contraction"] = pd.to_numeric(neurons_df["Contraction"], downcast="float")
neurons_df["Fragmentation"] = pd.to_numeric(neurons_df["Fragmentation"], downcast="float")
neurons_df["Partition asymmetry"] = pd.to_numeric(neurons_df["Partition asymmetry"], downcast="float")
neurons_df["Pk classic"] = pd.to_numeric(neurons_df["Pk classic"], downcast="float")
neurons_df["Bifurcation angle local"] = pd.to_numeric(neurons_df["Bifurcation angle local"], downcast="float")
neurons_df["Fractal dimension"] = pd.to_numeric(neurons_df["Fractal dimension"], downcast="float")
neurons_df["Number of branches"] = pd.to_numeric(neurons_df["Number of branches"], downcast="float")
neurons_df["Bifurcation angle remote"] = | pd.to_numeric(neurons_df["Bifurcation angle remote"], downcast="float") | pandas.to_numeric |
# Search the TSX web site to get a list of all listed companies
import os
import sys
import getopt
import datetime
# from numpy.lib.function_base import append
import pandas as pd
import sqlalchemy
import logging
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import pandas_datareader.data as web
from dotenv import load_dotenv
from sqlalchemy import engine
load_dotenv()
class TSX_Browser():
def __init__(self, chrome_driver_path=None):
self.url_ticker= "https://www.tsx.com/listings/listing-with-us/listed-company-directory"
self.__OPTIONS = webdriver.ChromeOptions()
self.__OPTIONS.add_experimental_option('excludeSwitches', ['enable-logging'])
if chrome_driver_path is None:
self.__CHROME_DRIVER_LOCATION = "C:\Program Files (x86)\chromedriver.exe"
else:
self.__CHROME_DRIVER_LOCATION = chrome_driver_path
self.driver = None
#self.driver = webdriver.Chrome(executable_path=self.__CHROME_DRIVER_LOCATION,options=self.__OPTIONS)
self.seconds_to_wait = 10
def close(self):
try:
self.driver.quit()
except Exception as e:
logging.info(f"Unable to quit() Chrome Driver, Error: {e}")
def set_exchange(self, exchange) -> bool:
# Make sure we have a driver available
if self.driver is None:
try:
self.driver = webdriver.Chrome(executable_path=self.__CHROME_DRIVER_LOCATION,options=self.__OPTIONS)
except Exception as e:
logging.critical(f"Unable to create Chrome Browser Driver, Error: {e}")
raise
return False
self.driver.get(self.url_ticker)
self.driver.minimize_window()
tsx_css_selectors = {"tsx":".tsx.on", "tsxv":".tsxv.on"}
if exchange.lower() in tsx_css_selectors.keys():
tsx_flag = tsx_css_selectors.get(exchange)
else:
# Use tsx by default if bad exchange passed
tsx_flag = tsx_css_selectors.get('tsx')
# Check if TSX Browser is already on desired exchange page
try:
WebDriverWait(self.driver, self.seconds_to_wait).until( EC.visibility_of_element_located((By.CSS_SELECTOR, tsx_flag)))
return True
except:
# Switch Exchange using TSX Browser button
switch_btn_xpath = '//*[@id="exchange-toggle"]'
switch_btn = self.driver.find_element_by_xpath(switch_btn_xpath)
switch_btn.click()
# Make sure the switch worked correctly or fail
try:
WebDriverWait(self.driver, self.seconds_to_wait).until( EC.visibility_of_element_located((By.CSS_SELECTOR, tsx_flag)))
return True
except Exception as e:
logging.warning(f"Unable to switch TSX Echange page, Error: {e}")
raise
return False
class TSX_Company_Info():
def __init__(self, name, ticker, exchange='tsx'):
self.name = name.upper().strip()
self.ticker = ticker.upper().strip()
self.exchange = exchange
self.url = f"https://money.tmx.com/en/quote/{self.ticker}"
self.yahoo = self.create_yahoo_ticker(self.ticker, self.exchange)
@property
def dict(self) -> dict:
data ={}
data["ticker"] = self.ticker
data["name"] = self.name
data["exchange"] = self.exchange
data["url"] = self.url
data["yahoo"] = self.yahoo
return data
def create_yahoo_ticker(self, ticker, exchange):
yahoo_ticker = ticker.replace('.','-')
# TODO: fix the doube "-" not found on yahoo exchange
yahoo_extension = "TO" if exchange == "tsx" else "V"
yahoo_ticker = f"{yahoo_ticker}.{yahoo_extension}"
return yahoo_ticker
class TSX:
def __init__(self):
self.chrome_browser = None
def dispose(self):
if self.chrome_browser is not None:
self.chrome_browser.close()
# Return a DataFrame with a list of TSX Company Info
def extract_tickers_for_str(self, search_str, exchange="tsx", progress=False) -> pd.DataFrame:
companies = []
if self.chrome_browser is None:
try:
chrome_browser = TSX_Browser()
self.chrome_browser = chrome_browser
except Exception as e:
logging.critical(f"Unable to create Chrome Browser, Error: {e}")
return None
# Make sur the TSX Page is on the desired stock exchange
try:
self.chrome_browser.set_exchange(exchange)
except Exception as e:
logging.warning(f"Unable to set TSX Exchange Page to: {exchange}, Error: {e}")
return None
if progress:
print(f"\n\n Extracting data for : {search_str}")
# Send letter into the search box of the page
search_box_xpath = '//*[@id="query"]'
search_box = self.chrome_browser.driver.find_element_by_xpath(search_box_xpath)
search_box.send_keys(search_str)
# Click the search button to send search request
search_btn_xpath = '//*[@id="btn-search-listed-company-directory"]'
search_btn = self.chrome_browser.driver.find_element_by_xpath(search_btn_xpath)
search_btn.click()
try:
# By.ID, By.xpath, BY.cssSelector
result_data_xpath = '//*[@id="tresults"]/tbody'
WebDriverWait(self.chrome_browser.driver, 10).until( EC.visibility_of_element_located((By.XPATH, result_data_xpath)))
datagrid = self.chrome_browser.driver.find_element_by_xpath(result_data_xpath)
#progress_msg(f"Received response for letter {search_str} ")
except TimeoutException:
return None
data = datagrid.find_elements_by_tag_name("tr")
# progress_msg(f"Extracting table data for letter {search_str} ")
companies = []
for row in data:
cells = row.find_elements_by_tag_name("td")
# Skip the line with no ticker symbol
if (cells[1].text != ""):
ticker = cells[1].text
name = cells[0].text
company = TSX_Company_Info(name, ticker, exchange=exchange)
companies.append(company.dict)
if progress:
print(f"-- Extracted : {exchange}, {ticker.ljust(10)}, {name.strip()} ")
df_companies = pd.DataFrame(companies)
df_companies.set_index('ticker')
return df_companies
def save_tickers_in_DB(self, DB, df_tickers, overwrite=False) -> bool:
try:
engine = sqlalchemy.create_engine(f"sqlite:///{DB}")
except Exception as e:
logging.critical(f"Unable to create DB Engine, Error: {e}")
raise
return False
save_type = "append" if not overwrite else "replace"
try:
df_tickers.to_sql('tsx_symbols', engine, if_exists=save_type)
saved = True
except Exception as e:
logging.warning(f"Unable to save DataFrame ({df_tickers}) using .to_sql() probably empty DataFrame, Error: {e}")
saved = False
# Raise an error
finally:
engine.dispose()
return saved
def update_all_tickers(self, DB, progess=False):
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','0-9']
overwrite = True
# Extract data from TSX
for letter in alphabet:
df_tickers = self.extract_tickers_for_str(letter,exchange='tsx', progress=progess)
self.save_tickers_in_DB(DB, df_tickers, overwrite=overwrite)
overwrite = False
# Extract data from TSX Venture
for letter in alphabet:
df_tickers = self.extract_tickers_for_str(letter,exchange='tsxv', progress=progess)
self.save_tickers_in_DB(DB, df_tickers, overwrite=overwrite)
def remove_duplicates(self, DB):
os.popen(f"copy {DB} {DB}.BK")
engine1 = sqlalchemy.create_engine(f"sqlite:///{DB}")
engine2 = sqlalchemy.create_engine(f"sqlite:///{DB}-cleaned")
tables = | pd.read_sql("SELECT name FROM sqlite_master WHERE type='table'", engine1) | pandas.read_sql |
"""
core.py
Created by <NAME> at 31/08/2020, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
from collections import namedtuple
import numpy as np
import pandas as pd
import requests
from EcoFin.dataDownload import functions as fc
from EcoFin.dataDownload import shared
try:
from urllib.parse import quote as urlencode
except ImportError:
from urllib import quote as urlencode
class TickerCore():
def __init__(self, ticker):
self.ticker = ticker.upper()
self.history = None
self.baseUrl = shared.baseUrl
self.fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._majorHolders = None
self._institutionalHolders = None
self._ISIN = None
self._calendar = None
self._expirations = {}
self._earnings = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._financials = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._balancesheet = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
self._cashflow = {
"Y": fc.emptyDataSerie(),
"Q": fc.emptyDataSerie()}
def getHistory(self, interval="1d",
start=None, end=None, actions=True,
autoAdjust=True, backAdjust=False,
proxy=None, rounding=True, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
autoAdjust: bool
Adjust all OHLC automatically? Default is True
backAdjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
params = {"period1": start, "period2": end}
params["events"] = "div,splits"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self.baseUrl, self.ticker)
key = '{}?{}'.format(url, '&'.join(['{}={}'.format(k, d) for k, d in params.items()]))
if shared.show_url: print('Connection request: {}'.format(key))
if shared.use_cache & shared.session_cache.keyExists(key):
data = shared.session_cache.read(key)
else:
data = requests.get(url=url, params=params, proxies=proxy)
if "Server" in data.text:
raise RuntimeError("Data provider is currently down!")
data = data.json()
shared.session_cache.add(key=key, var=data)
# Clean up errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or not data["chart"]["result"]:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# parse quotes
try:
quotes = fc.parseQuotes(data["chart"]["result"][0])
except Exception:
shared.DFS[self.ticker] = fc.emptyDataSerie()
shared.ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared.DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if autoAdjust:
quotes = fc.autoAdjust(quotes)
elif backAdjust:
quotes = fc.backAdjust(quotes)
if rounding:
quotes = np.round(quotes, data["chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = fc.parseEvents(data["chart"]["result"][0])
# combine
df = pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
df.index = | pd.to_datetime(df.index.date) | pandas.to_datetime |
import re
import math
import pandas as pd
import numpy as np
import nltk
import heapq
import pickle
import datetime
from nltk.corpus import stopwords
from operator import itemgetter
# Loading the dictionary
with open('dictionary.pkl', 'rb') as f:
data = pickle.load(f)
# Loading the dictionary with term count
with open('newdictionary.pkl', 'rb') as f:
newdata = pickle.load(f)
# Read the csv file
ff = pd.DataFrame(pd.read_csv('Airbnb_Texas_Rentals.csv'))
ff = ff.fillna('0')
ff = ff.drop(['Unnamed: 0'], axis=1)
# Insert the date_post_1 column based on the date of listing
ff['month']=[(x.split()[0]).lower() for x in ff['date_of_listing']]
ff['month_number']=np.where(ff['month']=='may',"-05-01",
np.where(ff['month']=='june',"-06-01",
np.where(ff['month']=='july',"-07-01",
np.where(ff['month']=="august","-08-01",
np.where(ff['month']=="september","-09-01",
np.where(ff['month']=="october","-10-01",
np.where(ff['month']=="november","-11-01",
np.where(ff['month']=="december","-12-01",
np.where(ff['month']=="january","-01-01",
np.where(ff['month']=="february","-02-01",
np.where(ff['month']=="march","-03-01",
np.where(ff['month']=="april","-04-01","-01-01"))))))))))))
ff['year']=[x.split()[1] for x in ff['date_of_listing']]
ff['date_post']=ff['year']+ff['month_number']
ff['date_post_1']=[pd.to_datetime(x) for x in ff['date_post']]
# calculate the room rate for each listing and merge it to the data frame
ff['rate_num']=[str(d).replace("$","") for d in ff['average_rate_per_night']]
ff=ff.fillna('0')
ff['rate_num_1']=[pd.to_numeric(x) if x!="nan" else 0 for x in ff['rate_num'] ]
ff_means=pd.DataFrame(ff.groupby(['city'])['rate_num_1'].mean())
ff_means.columns=['Average_in_this_city']
ff=ff.merge(ff_means, left_on='city', right_on='city', how='left')
# FUNCTIONS----FUNCTIONS----FUNCTIONS------------------------------
#input = [word1, word2, ...]
#output = {word1: [pos1, pos2], word2: [pos1, pos2], ...}
def index_one_file(termlist):
fileIndex = {}
words = list(set(termlist))
word_list = [x for x in termlist]
for i in range(len(word_list)):
for item in words:
if item == word_list[i]:
fileIndex.setdefault(item, []).append(i)
return fileIndex
#input = {filename: [word1, word2, ...], ...}
#ouput = {filename: {word: [pos1, pos2, ...]}, ...}
def make_indices(dictionary):
total = {}
for filename in dictionary.keys():
new = dictionary[filename]
total[filename] = index_one_file(new)
return total
# Dict reversal
#input = {filename: {word: [pos1, pos2, ...], ... }}
#output = {word: {filename: [pos1, pos2]}, ...}, ...}
def fullIndex(regdex):
total_index = {}
for filename in regdex.keys():
for word in regdex[filename].keys():
if word in total_index.keys():
if filename in total_index[word].keys():
total_index[word][filename].extend(regdex[filename][word][:])
else:
total_index[word][filename] = regdex[filename][word]
else:
total_index[word] = {filename: regdex[filename][word]}
return total_index
# Search Engine
# Preprocess the search
def preprocess(search):
search = search.lower().split()
stop_words = set(stopwords.words('english'))
lemma = nltk.wordnet.WordNetLemmatizer()
search_lst = []
for x in search:
if not x in stop_words:
x = re.sub("[^a-zA-Z]+", "*", x)
if "*" in x:
y = x.split('*')
y[0]=lemma.lemmatize(y[0])
search_lst.append(y[0])
if len(y)>1:
y[1]=lemma.lemmatize(y[1])
search_lst.append(y[1])
else:
x = lemma.lemmatize(x)
search_lst.append(x)
search_lst = (' '.join(search_lst))
return search_lst
#Input for the search
def search_eng_input(phrase):
phrase = phrase.lower().split()
n = len(phrase)
list1, list2, list3 = [], [], []
for x in phrase:
x = preprocess(x)
list1.append(x)
for x in list1:
if x in data.keys():
list2.append(set(data[x].keys()))
b = list2[0]
for i in range(0,len(list2)):
b = (b & list2[i])
for x in b:
list3.append(int(re.sub("[^0-9]+", "", x))-1)
return list3
# Executing the query and return the result for conjunctive search
def exec_query_s_1(search):
pd.set_option('display.max_colwidth', -1)
l = []
df = pd.DataFrame()
l = (search_eng_input(search))
if len(l)>0:
df = ff[['title','description','city', 'url']].loc[l]
if df.empty == False:
df.set_index('title', inplace=True)
return df
# TF-IDF
def tf(term_count, total_count):
return term_count / total_count
def idf(doc_count, contain_count):
return math.log(doc_count / contain_count)
def tf_idf(term_count, total_count, doc_count, contain_count):
if total_count == 0: total_count = 1
if contain_count == 0: contain_count = 1
return round(tf(term_count, total_count) * idf(doc_count, contain_count),2)
# return the number of words in a document when input in the name
def total_count(filename):
total = 0
inverse_data = fullIndex(data) #inverse the data
if filename in inverse_data.keys():
value = inverse_data.get(filename, 0) #get the sub_dict
for k, v in value.items():
total += len(v) # count the number of term in a document
return total
else:
return 0
# return the number of documents that contain a certain word when input in a term
def contain_count(term):
if term in data.keys():
return len(data[term].keys())
else:
return 0
# functions for returning the search with ranking similarity scores
#creating doc vectors
def doc_vec(query):
lemma = nltk.wordnet.WordNetLemmatizer()
querylist = query.split()
query = search_eng_input_1(query) # return the list of documents matched first
query = [x+1 for x in query] # +1 for the correct position
doc = {}
docvec = [0] * len(querylist)
for index, word in enumerate(querylist):
word = lemma.lemmatize(word)
word = word.lower()
try:
subvec = []
value = newdata[word]# get {doc1:tf-idf, doc2: tf-idf} of each word in search query
for k, v in value.items():
for i in query: # loop all the documents'ids that search gives
key = ('filtered_doc_%s'%str(i))
if key == k: # if the id is in the dict
subvec.append(v) # append the score to the vector = [[tf-idf1,tf-idf2,..],[tf-idf1,tf-idf2,..],..]
subvec += [0] * (len(query) - len(subvec)) # make the vectors equal in length for not found instances
docvec[index] = subvec
del subvec
except KeyError:
docvec[index] = [0]*len(value.keys()) # if the word not in dict, create a zero vector
# this loop return the dict with format {doc1:vector1,doc2:vector2,...} for the query
for index in range(len(docvec[0])):
sub_vec = [item[index] for item in docvec]
doc.update({query[index]:sub_vec})
return doc
#create query vector
def query_vec(query):
pattern = re.compile('[\W_]+') # for faster search function
query = pattern.sub(' ',query)
querylist = query.split()
b = len(querylist)
c = 18259 #total number of documents
queryvec = [0]*b
for index,word in enumerate(querylist):
a = querylist.count(word)
d = contain_count(word)
wordtfidf = tf_idf(a,b,c,d) # tf-idf score for each word
queryvec[index] = wordtfidf
return queryvec
def dotproduct(vec1, vec2):
if len(vec1) != len(vec2):
return 0
return sum([x*y for x,y in zip(vec1, vec2)])
def magnitude(vec):
return pow(sum(map(lambda x: x**2, vec)),.5)
# calculate the score of the results based on query
def generatescore(query):
queryvec = query_vec(query)
doc_vecs_dict = doc_vec(query)
score_dict = {}
for k, v in doc_vecs_dict.items():
score = round(dotproduct(queryvec, v)/(magnitude(queryvec)*magnitude(v)),2)
score_dict.update({k:score})
return score_dict
# heap data structure to keep top k
def heappq(mysearch):
query = search_eng_input(mysearch)
k = 10 # default top k-element
if k >= len(query):
k = len(query)
d = generatescore(mysearch)
k_keys_sorted = heapq.nlargest(k, d.items(), key = itemgetter(1))
key_lst, score_lst = [], []
for i in range(k):
key_lst.append(k_keys_sorted[i][0])
score_lst.append(k_keys_sorted[i][1])
return key_lst, score_lst
# executing tf_idf conjunctive search
def exec_tfidf_search(mysearch):
key_lst, score_lst = heappq(mysearch)
key_lst = [x-1 for x in key_lst] # to get the correct row in df
pd.set_option('display.max_colwidth', -1)
df = pd.DataFrame()
if len(key_lst)>0:
df = ff[['title','description','city', 'url']].loc[key_lst]
df['similarity'] = score_lst
if df.empty == False:
df.set_index('title', inplace=True)
return df
#function for scoring for Step 4 of the task
def search_eng_input_1(search):
search=search.lower().split()
n=len(search)
list1, list2, list3=[],[],[]
for x in search:
x=preprocess(x)
list1.append(x)
for x in list1:
if x in data.keys():
list2.append(set(data[x].keys()))
if len(list2)>0:
b=list2[0]
for i in range(0,len(list2)):
a=b
b=(b & list2[i])
if len(b)==0:
b=a
break
else:
a=b
for x in b:
list3.append(int(re.sub("[^0-9]+", "", x))-1)
if len(list3)==0:
list3=[1,2,3]
return (list3)
# Executing search query
def exec_query_s_2(s):
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning analysis
#
# - This is a Python base notebook
#
# Kaggle's [Spotify Song Attributes](https://www.kaggle.com/geomack/spotifyclassification/home) dataset contains a number of features of songs from 2017 and a binary variable `target` that represents whether the user liked the song (encoded as 1) or not (encoded as 0). See the documentation of all the features [here](https://developer.spotify.com/documentation/web-api/reference/tracks/get-audio-features/).
# ## Imports
# ### Import libraries
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.model_selection import (
RandomizedSearchCV,
cross_validate,
train_test_split,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.ensemble import RandomForestClassifier
from lightgbm.sklearn import LGBMClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import (
classification_report,
roc_curve,
RocCurveDisplay,
roc_auc_score
)
import shap
from pylyrics import clean_text as ct
# In[2]:
def mean_std_cross_val_scores(model, X_train, y_train, **kwargs):
"""
Returns mean and std of cross validation
Parameters
----------
model :
scikit-learn model
X_train : numpy array or pandas DataFrame
X in the training data
y_train :
y in the training data
Returns
----------
pandas Series with mean scores from cross_validation
"""
scores = cross_validate(model, X_train, y_train, **kwargs)
mean_scores = pd.DataFrame(scores).mean()
std_scores = pd.DataFrame(scores).std()
out_col = []
for i in range(len(mean_scores)):
out_col.append((f"%0.3f (+/- %0.3f)" % (mean_scores[i], std_scores[i])))
return | pd.Series(data=out_col, index=mean_scores.index) | pandas.Series |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
Evaluation
Usage:
pyannote-metrics.py detection [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py segmentation [--subset=<subset> --tolerance=<seconds>] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py diarization [--subset=<subset> --greedy --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py identification [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py spotting [--subset=<subset> --latency=<seconds>... --filter=<expression>...] <database.task.protocol> <hypothesis.json>
pyannote-metrics.py -h | --help
pyannote-metrics.py --version
Options:
<database.task.protocol> Set evaluation protocol (e.g. "Etape.SpeakerDiarization.TV")
--subset=<subset> Evaluated subset (train|developement|test) [default: test]
--collar=<seconds> Collar, in seconds [default: 0.0].
--skip-overlap Do not evaluate overlap regions.
--tolerance=<seconds> Tolerance, in seconds [default: 0.5].
--greedy Use greedy diarization error rate.
--latency=<seconds> Evaluate with fixed latency.
--filter=<expression> Filter out target trials that do not match the
expression; e.g. use --filter="speech>10" to skip
target trials with less than 10s of speech from
the target.
-h --help Show this screen.
--version Show version.
All modes but "spotting" expect hypothesis using the RTTM file format.
RTTM files contain one line per speech turn, using the following convention:
SPEAKER {uri} 1 {start_time} {duration} <NA> <NA> {speaker_id} <NA> <NA>
* uri: file identifier (as given by pyannote.database protocols)
* start_time: speech turn start time in seconds
* duration: speech turn duration in seconds
* speaker_id: speaker identifier
"spotting" mode expects hypothesis using the following JSON file format.
It should contain a list of trial hypothesis, using the same trial order as
pyannote.database speaker spotting protocols (e.g. protocol.test_trial())
[
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
...
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
]
* uri: file identifier (as given by pyannote.database protocols)
* model_id: target identifier (as given by pyannote.database protocols)
* [ti, vi]: [time, value] pair indicating that the system has output the
score vi at time ti (e.g. [10.2, 0.2] means that the system
gave a score of 0.2 at time 10.2s).
Calling "spotting" mode will create a bunch of files.
* <hypothesis.det.txt> contains DET curve using the following raw file format:
<threshold> <fpr> <fnr>
* <hypothesis.lcy.txt> contains latency curves using this format:
<threshold> <fpr> <fnr> <speaker_latency> <absolute_latency>
"""
# command line parsing
from docopt import docopt
import sys
import json
import warnings
import functools
import numpy as np
import pandas as pd
from tabulate import tabulate
# import multiprocessing as mp
from pyannote.core import Annotation
from pyannote.database.util import load_rttm
# evaluation protocols
from pyannote.database import get_protocol
from pyannote.database.util import get_annotated
from pyannote.metrics.detection import DetectionErrorRate
from pyannote.metrics.detection import DetectionAccuracy
from pyannote.metrics.detection import DetectionRecall
from pyannote.metrics.detection import DetectionPrecision
from pyannote.metrics.segmentation import SegmentationPurity
from pyannote.metrics.segmentation import SegmentationCoverage
from pyannote.metrics.segmentation import SegmentationPrecision
from pyannote.metrics.segmentation import SegmentationRecall
from pyannote.metrics.diarization import GreedyDiarizationErrorRate
from pyannote.metrics.diarization import DiarizationErrorRate
from pyannote.metrics.diarization import DiarizationPurity
from pyannote.metrics.diarization import DiarizationCoverage
from pyannote.metrics.identification import IdentificationErrorRate
from pyannote.metrics.identification import IdentificationPrecision
from pyannote.metrics.identification import IdentificationRecall
from pyannote.metrics.spotting import LowLatencySpeakerSpotting
showwarning_orig = warnings.showwarning
def showwarning(message, category, *args, **kwargs):
import sys
print(category.__name__ + ':', str(message))
warnings.showwarning = showwarning
def get_hypothesis(hypotheses, current_file):
"""Get hypothesis for given file
Parameters
----------
hypotheses : `dict`
Speaker diarization hypothesis provided by `load_rttm`.
current_file : `dict`
File description as given by pyannote.database protocols.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Hypothesis corresponding to `current_file`.
"""
uri = current_file['uri']
if uri in hypotheses:
return hypotheses[uri]
# if the exact 'uri' is not available in hypothesis,
# look for matching substring
tmp_uri = [u for u in hypotheses if u in uri]
# no matching speech turns. return empty annotation
if len(tmp_uri) == 0:
msg = f'Could not find hypothesis for file "{uri}"; assuming empty file.'
warnings.warn(msg)
return Annotation(uri=uri, modality='speaker')
# exactly one matching file. return it
if len(tmp_uri) == 1:
hypothesis = hypotheses[tmp_uri[0]]
hypothesis.uri = uri
return hypothesis
# more that one matching file. error.
msg = f'Found too many hypotheses matching file "{uri}" ({uris}).'
raise ValueError(msg.format(uri=uri, uris=tmp_uri))
def process_one(item, hypotheses=None, metrics=None):
reference = item['annotation']
hypothesis = get_hypothesis(hypotheses, item)
uem = get_annotated(item)
return {key: metric(reference, hypothesis, uem=uem)
for key, metric in metrics.items()}
def get_reports(protocol, subset, hypotheses, metrics):
process = functools.partial(process_one,
hypotheses=hypotheses,
metrics=metrics)
# get items and their number
progress = protocol.progress
protocol.progress = False
items = list(getattr(protocol, subset)())
protocol.progress = progress
n_items = len(items)
for item in items:
process(item)
# HB. 2018-02-05: parallel processing was removed because it is not clear
# how to handle the case where the same 'uri' is processed several times
# in a possibly different order for each sub-metric...
# # heuristic to estimate the optimal number of processes
# chunksize = 20
# processes = max(1, min(mp.cpu_count(), n_items // chunksize))
# pool = mp.Pool(processes)
# _ = pool.map(process, items, chunksize=chunksize)
return {key: metric.report(display=False)
for key, metric in metrics.items()}
def reindex(report):
"""Reindex report so that 'TOTAL' is the last row"""
index = list(report.index)
i = index.index('TOTAL')
return report.reindex(index[:i] + index[i+1:] + ['TOTAL'])
def detection(protocol, subset, hypotheses, collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'error': DetectionErrorRate(**options),
'accuracy': DetectionAccuracy(**options),
'precision': DetectionPrecision(**options),
'recall': DetectionRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
accuracy = metrics['accuracy'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
report['accuracy', '%'] = accuracy[metrics['accuracy'].name, '%']
report['precision', '%'] = precision[metrics['precision'].name, '%']
report['recall', '%'] = recall[metrics['recall'].name, '%']
report = reindex(report)
columns = list(report.columns)
report = report[[columns[0]] + columns[-3:] + columns[1:-3]]
summary = 'Detection (collar = {0:g} ms{1})'.format(
1000*collar, ', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(4)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[4:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def segmentation(protocol, subset, hypotheses, tolerance=0.5):
options = {'tolerance': tolerance, 'parallel': True}
metrics = {'coverage': SegmentationCoverage(**options),
'purity': SegmentationPurity(**options),
'precision': SegmentationPrecision(**options),
'recall': SegmentationRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
coverage = metrics['coverage'].report(display=False)
purity = metrics['purity'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
coverage = coverage[metrics['coverage'].name]
purity = purity[metrics['purity'].name]
precision = precision[metrics['precision'].name]
recall = recall[metrics['recall'].name]
report = | pd.concat([coverage, purity, precision, recall], axis=1) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (CSJ corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
import pickle
sys.path.append('../')
from csj.path import Path
from csj.input_data import read_audio
from csj.labels.transcript import read_sdb
from utils.util import mkdir_join
from utils.inputs.wav_split import split_wav
from utils.dataset import add_element
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help='path to CSJ dataset')
parser.add_argument('--dataset_save_path', type=str,
help='path to save dataset')
parser.add_argument('--feature_save_path', type=str,
help='path to save input features')
parser.add_argument('--wav_save_path', type=str,
help='path to save wav files (per utterance)')
parser.add_argument('--tool', type=str,
choices=['htk', 'python_speech_features', 'librosa'])
parser.add_argument('--htk_save_path', type=str, help='path to save features')
parser.add_argument('--normalize', type=str,
choices=['global', 'speaker', 'utterance', 'no'])
parser.add_argument('--save_format', type=str, choices=['numpy', 'htk', 'wav'])
parser.add_argument('--feature_type', type=str, choices=['fbank', 'mfcc'])
parser.add_argument('--channels', type=int,
help='the number of frequency channels')
parser.add_argument('--window', type=float,
help='window width to extract features')
parser.add_argument('--slide', type=float, help='extract features per slide')
parser.add_argument('--energy', type=int, help='if 1, add the energy feature')
parser.add_argument('--delta', type=int, help='if 1, add the energy feature')
parser.add_argument('--deltadelta', type=int,
help='if 1, double delta features are also extracted')
parser.add_argument('--subset', type=int,
help='If True, create small dataset.')
parser.add_argument('--fullset', type=int,
help='If True, create full-size dataset.')
args = parser.parse_args()
path = Path(data_path=args.data_path,
config_path='./config',
htk_save_path=args.htk_save_path)
CONFIG = {
'feature_type': args.feature_type,
'channels': args.channels,
'sampling_rate': 16000,
'window': args.window,
'slide': args.slide,
'energy': bool(args.energy),
'delta': bool(args.delta),
'deltadelta': bool(args.deltadelta)
}
if args.save_format == 'htk':
assert args.tool == 'htk'
def main(data_size):
speaker_dict_dict = {} # dict of speaker_dict
for data_type in ['train', 'eval1', 'eval2', 'eval3']:
print('=' * 50)
print(' ' * 20 + data_type + ' (' + data_size + ')' + ' ' * 20)
print('=' * 50)
########################################
# labels
########################################
if data_type == 'train':
label_paths = path.trans(data_type='train_' + data_size)
else:
label_paths = path.trans(data_type=data_type)
save_vocab_file = True if data_type == 'train' else False
is_test = True if 'eval' in data_type else False
print('=> Processing transcripts...')
speaker_dict_dict[data_type] = read_sdb(
label_paths=label_paths,
data_size=data_size,
vocab_file_save_path=mkdir_join('./config', 'vocab_files'),
save_vocab_file=save_vocab_file,
is_test=is_test,
data_type=data_type)
########################################
# inputs
########################################
print('\n=> Processing input data...')
input_save_path = mkdir_join(
args.feature_save_path, args.save_format, data_size)
if isfile(join(input_save_path, data_type, 'complete.txt')):
print('Already exists.')
else:
if args.save_format == 'wav':
########################################
# Split WAV files per utterance
########################################
if data_type == 'train':
wav_paths = path.wav(corpus='train' + data_size)
else:
wav_paths = path.wav(corpus=data_type)
split_wav(wav_paths=wav_paths,
speaker_dict=speaker_dict_dict[data_type],
save_path=mkdir_join(input_save_path, data_type))
# NOTE: ex.) save_path:
# csj/feature/save_format/data_size/data_type/speaker/utt_name.npy
elif args.save_format in ['numpy', 'htk']:
if data_type == 'train':
if args.tool == 'htk':
audio_paths = path.htk(data_type='train_' + data_size)
else:
audio_paths = path.wav(data_type='train_' + data_size)
is_training = True
global_mean_male, global_std_male, global_mean_female, global_std_female = None, None, None, None
else:
if args.tool == 'htk':
audio_paths = path.htk(data_type=data_type)
else:
audio_paths = path.wav(data_type=data_type)
is_training = False
# Load statistics over train dataset
global_mean_male = np.load(
join(input_save_path, 'train/global_mean_male.npy'))
global_std_male = np.load(
join(input_save_path, 'train/global_std_male.npy'))
global_mean_female = np.load(
join(input_save_path, 'train/global_mean_female.npy'))
global_std_female = np.load(
join(input_save_path, 'train/global_std_female.npy'))
read_audio(audio_paths=audio_paths,
speaker_dict=speaker_dict_dict[data_type],
tool=args.tool,
config=CONFIG,
normalize=args.normalize,
is_training=is_training,
save_path=mkdir_join(input_save_path, data_type),
save_format=args.save_format,
global_mean_male=global_mean_male,
global_std_male=global_std_male,
global_mean_female=global_mean_female,
global_std_female=global_std_female)
# NOTE: ex.) save_path:
# csj/feature/save_format/data_size/data_type/speaker/*.npy
# Make a confirmation file to prove that dataset was saved
# correctly
with open(join(input_save_path, data_type, 'complete.txt'), 'w') as f:
f.write('')
########################################
# dataset (csv)
########################################
print('\n=> Saving dataset files...')
dataset_save_path = mkdir_join(
args.dataset_save_path, args.save_format, data_size, data_type)
df_columns = ['frame_num', 'input_path', 'transcript']
df_kanji = pd.DataFrame([], columns=df_columns)
df_kanji_divide = pd.DataFrame([], columns=df_columns)
df_kana = pd.DataFrame([], columns=df_columns)
df_kana_divide = pd.DataFrame([], columns=df_columns)
df_phone = pd.DataFrame([], columns=df_columns)
df_phone_divide = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
with open(join(input_save_path, data_type, 'frame_num.pickle'), 'rb') as f:
frame_num_dict = pickle.load(f)
utt_count = 0
df_kanji_list, df_kanji_divide_list = [], []
df_kana_list, df_kana_divide_list = [], []
df_phone_list, df_phone_divide_list = [], []
df_word_freq1_list, df_word_freq5_list = [], []
df_word_freq10_list, df_word_freq15_list = [], []
speaker_dict = speaker_dict_dict[data_type]
for speaker, utt_dict in tqdm(speaker_dict.items()):
for utt_index, utt_info in utt_dict.items():
kanji_indices, kanji_divide_indices = utt_info[2:4]
kana_indices, kana_divide_indices = utt_info[4:6]
phone_indices, phone_divide_indices = utt_info[6:8]
word_freq1_indices, word_freq5_indices = utt_info[8:10]
word_freq10_indices, word_freq15_indices = utt_info[10:12]
if args.save_format == 'numpy':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.npy')
elif args.save_format == 'htk':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.htk')
elif args.save_format == 'wav':
input_utt_save_path = path.utt2wav(utt_index)
else:
raise ValueError('save_format is numpy or htk or wav.')
assert isfile(input_utt_save_path)
frame_num = frame_num_dict[speaker + '_' + utt_index]
df_kanji = add_element(
df_kanji, [frame_num, input_utt_save_path, kanji_indices])
df_kanji_divide = add_element(
df_kanji_divide, [frame_num, input_utt_save_path, kanji_divide_indices])
df_kana = add_element(
df_kana, [frame_num, input_utt_save_path, kana_indices])
df_kana_divide = add_element(
df_kana_divide, [frame_num, input_utt_save_path, kana_divide_indices])
df_phone = add_element(
df_phone, [frame_num, input_utt_save_path, phone_indices])
df_phone_divide = add_element(
df_phone_divide, [frame_num, input_utt_save_path, phone_divide_indices])
df_word_freq1 = add_element(
df_word_freq1, [frame_num, input_utt_save_path, word_freq1_indices])
df_word_freq5 = add_element(
df_word_freq5, [frame_num, input_utt_save_path, word_freq5_indices])
df_word_freq10 = add_element(
df_word_freq10, [frame_num, input_utt_save_path, word_freq10_indices])
df_word_freq15 = add_element(
df_word_freq15, [frame_num, input_utt_save_path, word_freq15_indices])
utt_count += 1
# Reset
if utt_count == 10000:
df_kanji_list.append(df_kanji)
df_kanji_divide_list.append(df_kanji_divide)
df_kana_list.append(df_kana)
df_kana_divide_list.append(df_kana_divide)
df_phone_list.append(df_phone)
df_phone_divide_list.append(df_phone_divide)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
df_kanji = pd.DataFrame([], columns=df_columns)
df_kanji_divide = pd.DataFrame([], columns=df_columns)
df_kana = pd.DataFrame([], columns=df_columns)
df_kana_divide = pd.DataFrame([], columns=df_columns)
df_phone = pd.DataFrame([], columns=df_columns)
df_phone_divide = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
utt_count = 0
# Last dataframe
df_kanji_list.append(df_kanji)
df_kanji_divide_list.append(df_kanji_divide)
df_kana_list.append(df_kana)
df_kana_divide_list.append(df_kana_divide)
df_phone_list.append(df_phone)
df_phone_divide_list.append(df_phone_divide)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
# Concatenate all dataframes
df_kanji = df_kanji_list[0]
df_kanji_divide = df_kanji_divide_list[0]
df_kana = df_kana_list[0]
df_kana_divide = df_kana_divide_list[0]
df_phone = df_phone_list[0]
df_phone_divide = df_phone_divide_list[0]
df_word_freq1 = df_word_freq1_list[0]
df_word_freq5 = df_word_freq5_list[0]
df_word_freq10 = df_word_freq10_list[0]
df_word_freq15 = df_word_freq15_list[0]
for df_i in df_kanji_list[1:]:
df_kanji = | pd.concat([df_kanji, df_i], axis=0) | pandas.concat |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import ConvergenceWarning
from glob import glob
from multiprocessing import Pool
import sys, getopt, warnings, traceback
import pandas as pd
import numpy as np
import re
warnings.filterwarnings('error')
def classify(X, ys, label, outpath):
y = ys[label].dropna().astype(bool)
common_subjs = X.index.intersection(y.index)
X_label = X.loc[common_subjs]
y = y.loc[common_subjs]
X_monkey = X_label.sample(frac=1)
X_monkey.index = X_label.index
X_monkey[['age', 'gender']] = X_label[['age', 'gender']]
features = X_label.columns.drop(['age', 'gender'])
all_preds = pd.DataFrame()
weights_mine = []
weights_monkey = []
for fold in range(20):
try:
X_train, X_test, y_train, y_test = train_test_split(X_label, y)
preds = pd.DataFrame()
preds['truth'] = y_test
preds['gender'] = X_test['gender']
preds['age'] = X_test['age']
preds['monkey'] = 'NA'
preds['my_preds'] = 'NA'
preds['fold'] = fold
preds['warning'] = None
preds['my_preds_prob'] = None
preds['monkey_prob'] = None
clf_mine = LogisticRegression(solver='lbfgs', max_iter=5000)
clf_monkey = LogisticRegression(solver='lbfgs', max_iter=5000)
clf_mine.fit(X_train, y_train)
clf_monkey.fit(X_monkey.loc[X_train.index], y_train)
preds['my_preds'] = clf_mine.predict(X_test)
preds['monkey'] = clf_monkey.predict(X_test)
preds['my_preds_prob'] = clf_mine.predict_proba(X_test)[:,0]
preds['monkey_prob'] = clf_monkey.predict_proba(X_test)[:,0]
all_preds = pd.concat([all_preds, preds])
weights_monkey.append(clf_monkey.coef_[0])
weights_mine.append(clf_mine.coef_[0])
except ConvergenceWarning:
preds['warning'] = 'ConvergenceWarning'
all_preds = pd.concat([all_preds, preds])
label = label.replace('/', '_')
all_preds.to_csv('%s/%s.tsv' % (outpath, label), sep='\t')
pd.DataFrame(weights_mine).to_csv('%s/%s_weights_mine.csv' % (outpath, label))
pd.DataFrame(weights_monkey).to_csv('%s/%s_weights_monkey.csv' % (outpath, label))
def process(X, ys, label, outpath):
try:
n_preds = classify(X, ys, label, outpath)
except Exception as e:
label = label.replace('/', '')
f = open('%s/exceptions_%s.txt' % (outpath, label), 'w')
print(str(e)+'\n')
f.write(str(label)+'\n')
f.write(str(e)+'\n')
f.write(traceback.format_exc())
f.close()
def main(argv):
features, labels, outpath = '', '', ''
try:
opts, args = getopt.getopt(argv,"h:x:y:o:",\
["features=","labels=","outpath="])
except getopt.GetoptError:
print('classify.py -x <featuresfile> -y <labelsfile> -o <outputspath>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('classify.py -x <featuresfile> -y <labelsfile> -o <outputspath>')
sys.exit()
elif opt in ("-x", "--features"):
features = arg
elif opt in ("-y", "--labels"):
labels = arg
elif opt in ("-o", "--outpath"):
outpath = arg
print('Features file is ', features)
print('Labels file is ', labels)
print('Output file is ', outpath)
X = pd.read_csv(features, index_col=0)
ys = | pd.read_csv(labels, index_col=0) | pandas.read_csv |
from ontobio.io import assocparser, gpadparser
from ontobio import ecomap
import click
import pandas as pd
import datetime
from ontobio.io import qc
from ontobio.io.assocparser import Report
from ontobio.model.association import GoAssociation
from ontobio.model import collections
from typing import List
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
@click.command()
@click.option("--file1",
"-file1",
type=click.Path(),
required=True,
help='file1 is the source file.')
@click.option("--file2",
"-file2",
type=click.Path(),
required=True,
help='file2 is the file that is the result of a transformation, or the target file.')
@click.option("--output",
"-o",
type=click.STRING,
required=True,
help='the name of the prefix for all files generated by this tool.')
@click.option("--group_by_column",
"-gb",
type=click.Choice(['evidence_code', 'subject', 'object']),
multiple=True,
required=False,
help='Options to group by include: subject, object, and/or evidence_code.')
@click.option("--restrict_to_decreases",
"-rtd",
type=click.BOOL,
required=False,
help='Only report group by results when the second file shows a decrease in number by grouping column')
def compare_files(file1, file2, output, group_by_column, restrict_to_decreases):
"""
Method to compare two GPAD or GAF files and report differences on a file level and via converting
file-based rows to GoAssociation objects.
:param file1: Name of the source file to compare
:type file1: str
:param file2: Name of the target/second file to compare
:type file2: str
:param output: The prefix that will be appended to all the output files/reports created by this script.
:type output: str
:param group_by_column: Name of the target/second file to compare
:type group_by_column: List
:param restrict_to_decreases: An optional boolean flag that allows the grouping column counts to be returned only
if they show a decrease in number beteween file1 and file2
:type restrict_to_decreases: bool
"""
pd.set_option('display.max_rows', 35000)
df_file1, df_file2, assocs1, assocs2 = get_parser(file1, file2)
generate_count_report(df_file1, df_file2, file1, file2, output)
compare_associations(assocs1, assocs2, output, file1, file2)
generate_group_report(df_file1, df_file2, group_by_column, file1, file2, restrict_to_decreases, output)
def generate_count_report(df_file1, df_file2, file1, file2, output):
"""
Method to generate a report of the number of distinct values of each of the columns
in a GAF or GPAD file. Currently restricted to the following columns: subject, qualifiers, object, evidence_code
and reference.
Uses pandas internal functions like merge and nunique to count and display metrics.
:param df_file1: data frame representing a normalized columnar represenation of file1
:type df_file1: pd
:param df_file2: data frame representing a normalized columnar represenation of file2
:type df_file2: pd
:param file1: The file name of the file provided in the click for reporting purposes.
:type file1: str
:param file2: The file name of the file provided in the click for reporting purposes.
:type file2: str
:param output: Prefix of the reported files for reporting purposes.
:type output: str
"""
file1_groups, counts_frame1 = get_column_count(df_file1, file1)
file2_groups, counts_frame2 = get_column_count(df_file2, file2)
merged_frame = | pd.concat([counts_frame1, counts_frame2], axis=1) | pandas.concat |
# encoding: utf-8
# copyright: GeoDS Lab, University of Wisconsin-Madison
# authors: <NAME>, <NAME>, <NAME>
import requests
import os
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Start month, start day, and output_folder are necessary')
parser.add_argument('--start_year', type=str, required=True, help='Start year')
parser.add_argument('--start_month', type=str, required=True, help='Start month')
parser.add_argument('--start_day', type=str, required=True, help='Start day')
parser.add_argument('--end_year', type=str, help='End year')
parser.add_argument('--end_month', type=str, help='End month')
parser.add_argument('--end_day', type=str, help='End day')
parser.add_argument('--output_folder', type=str, required=True, help='Output folder: ./')
parser.add_argument('--ct', action = 'store_true', help='ct2ct')
parser.add_argument('--county', action = 'store_true', help='county2county')
parser.add_argument('--state', action = 'store_true', help='state2state')
args = parser.parse_args()
start_year = str(args.start_year).zfill(4)
start_month = str(args.start_month).zfill(2)
start_day = str(args.start_day).zfill(2)
output_folder = args.output_folder
if args.end_year == None:
end_year = str(args.start_year).zfill(4)
else:
end_year = str(args.end_year).zfill(4)
if args.end_month == None:
end_month = str(args.start_month).zfill(2)
else:
end_month = str(args.end_month).zfill(2)
if args.end_day == None:
end_day = str(args.start_day).zfill(2)
else:
end_day = str(args.end_day).zfill(2)
# Download files of one day
def download_file(scale, year, month, day, output_folder):
try:
if os.path.exists(f"{output_folder}/") == False:
os.mkdir(f"{output_folder}/")
if os.path.exists(f"{output_folder}/{scale}/") == False:
os.mkdir(f"{output_folder}/{scale}/")
if scale == "ct2ct":
if os.path.exists(f"{output_folder}/{scale}/{year}_{month}_{day}/") == False:
os.mkdir(f"{output_folder}/{scale}/{year}_{month}_{day}/")
except Exception as e:
print(e)
print("There is no output folder. Please create the output folder first!")
try:
if scale == "ct2ct":
for i in range(20):
if year == "2019":
if (month == "01") or (month == "02") or (month == "03") or (month == "04"):
repo = "DailyFlows-Ct2019-1"
elif (month == "05") or (month == "06") or (month == "07") or (month == "08"):
repo = "DailyFlows-Ct2019-2"
elif (month == "09") or (month == "10") or (month == "11") or (month == "12"):
repo = "DailyFlows-Ct2019-3"
elif year == "2020":
if (month == "01") or (month == "02") or (month == "03") or (month == "04"):
repo = "DailyFlows-Ct2020-1"
elif (month == "05") or (month == "06") or (month == "07") or (month == "08"):
repo = "DailyFlows-Ct2020-2"
elif (month == "09") or (month == "10") or (month == "11") or (month == "12"):
repo = "DailyFlows-Ct2020-3"
elif year == "2021":
repo = "DailyFlows-Ct2021"
r = requests.get(url=f"https://raw.githubusercontent.com/GeoDS/COVID19USFlows-{repo}/master/daily_flows/{scale}/{year}_{month}_{day}/daily_{scale}_{year}_{month}_{day}_{i}.csv")
with open(f"{output_folder}/{scale}/{year}_{month}_{day}/daily_{scale}_{year}_{month}_{day}_{i}.csv", 'wb') as file:
file.write(r.content)
else:
r = requests.get(url=f"https://raw.githubusercontent.com/GeoDS/COVID19USFlows-DailyFlows/master/daily_flows/{scale}/daily_{scale}_{year}_{month}_{day}.csv")
with open(f"{output_folder}/{scale}/daily_{scale}_{year}_{month}_{day}.csv", 'wb') as file:
file.write(r.content)
return True
except Exception as e:
print(e)
return False
# Create time series dataframe
time_df = pd.date_range(start=f'{start_year}-{start_month}-{start_day}', end=f'{end_year}-{end_month}-{end_day}')
time_df = | pd.DataFrame(time_df, columns=["date"]) | pandas.DataFrame |
from abc import ABC, abstractproperty
from collections import namedtuple
import numpy as np
import pandas as pd
from loguru import logger
#from helpers import persist_model
@logger.catch
def persist_model(name,clf=None, method='load'):
'Pass in the file name, object to be saved or loaded'
import dill
if method == 'load':
with open(name,'rb') as f:
return dill.load(f)
elif method == 'save':
logger.info(f'[+] Persisting {name} ...')
if clf is None:
raise ValueError('Pass Model/Pipeline/Transformation')
with open(name,'wb') as f:
dill.dump(clf,f)
logger.info(f'[+] Persistence Complete. Model {name} is saved')
else:
raise ValeuError('Wrong arguments')
MODEL_PATH='hisia/models/base_model.pkl'
pre_load_model = persist_model(MODEL_PATH, method='load')
Sentiment = namedtuple('Sentiment', ['sentiment','positive_probability', 'negative_probability'])
class HisiaLoad(ABC):
def __init__(self, model_path=None):
if model_path is None:
self.model = pre_load_model
else:
self.model = persist_model(model_path, method='load')
def __repr__(self):
return f'{self.__class__.__name__}(Model=Logistic Regression)'
@abstractproperty
def sentiment(self):
pass
class Hisia(HisiaLoad):
def __init__(self, text, model_path=None):
super().__init__(model_path)
self.text = text
self.sentiment
def __repr__(self):
return (f'Sentiment(sentiment={self.sentiment.sentiment}, '
f'positive_probability={self.sentiment.positive_probability}, '
f'negative_probability={self.sentiment.negative_probability})')
@property
def sentiment(self):
if isinstance(self.text, str):
self.X = [self.text]
else:
self.X = self.text
response = self.model.predict_proba(self.X)
response = pd.DataFrame(response)
response.columns = ['negative_probability','positive_probability']
response['sentiment'] = np.where(response['negative_probability'] > .5, 'negative', 'positive')
self.results = Sentiment(**response.round(3).to_dict(orient='index')[0])
return self.results
@property
def explain(self):
feature_names = self.model.named_steps['count_verctorizer'].get_feature_names()
best_features = [feature_names[i] for i in \
self.model.named_steps['feature_selector'].get_support(indices=True)]
coefficients = self.model.named_steps['logistic_regression'].coef_[0]
index_range = range(len(best_features))
look_table = {index:(token,coef) for index, coef, token in zip(index_range, coefficients, best_features)}
v = self.model.named_steps['count_verctorizer'].transform(self.X)
v = self.model.named_steps['feature_selector'].transform(v)
v = | pd.DataFrame.sparse.from_spmatrix(v) | pandas.DataFrame.sparse.from_spmatrix |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
weights1 = pd.DataFrame(vals, index=widx, columns=cols)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')],
names=("date", "contract"))
cols = pd.Index(["CO1"], name="generic")
weights2 = pd.DataFrame(1, index=widx, columns=cols)
wts_exp = {"CL": weights1, "CO": weights2}
assert_dict_of_frames(wts, wts_exp)
def test_reindex():
# related to https://github.com/matthewgilbert/mapping/issues/11
# no op
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
prices = pd.Series([103, 101, 102, 100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLH5')])
new_prices = util.reindex(prices, widx, limit=0)
exp_prices = prices
assert_series_equal(exp_prices, new_prices)
# missing front prices error
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
prices = pd.Series([100], index=idx)
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5')])
with pytest.raises(ValueError):
util.reindex(prices, widx, 0)
# NaN returns introduced and filled
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
( | TS('2015-01-04') | pandas.Timestamp |
import networkx as nx
import pandas as pd
def apply(df, prev, curr, prev_type, curr_type):
prev_nodes = set(df.dropna(subset=[prev], how="any")[prev].unique())
succ_nodes = set(df.dropna(subset=[curr], how="any")[curr].unique())
all_nodes = prev_nodes.union(succ_nodes)
edges = set()
df = df.dropna(subset=[prev, curr], how="any")
stream = df[[prev, curr, prev_type, curr_type]].to_dict('records')
types = {}
for el in stream:
source = el[prev]
target = el[curr]
type_source = el[prev_type]
type_target = el[curr_type]
types[source] = type_source
types[target] = type_target
if source is not None and target is not None:
if str(source).lower() != "nan" and str(target).lower():
edges.add((source, target))
G = nx.DiGraph()
for node in all_nodes:
G.add_node(node)
for edge in edges:
G.add_edge(edge[0], edge[1])
return G, types
def get_conn_comp(df, prev, curr, prev_type, curr_type, ref_type=""):
G, types = apply(df, prev, curr, prev_type, curr_type)
conn_comp = nx.connected_components(nx.Graph(G))
list_corresp = []
for index, cc in enumerate(conn_comp):
for n2 in cc:
this_type = types[n2] if n2 in types else ""
list_corresp.append({"node": n2, "type": this_type, "case:concept:name": str(index)})
dataframe = pd.DataFrame(list_corresp).sort_values("node")
return dataframe
def get_ancestors_successors(df, prev, curr, prev_type, curr_type, ref_type="", all_docs=None):
G, types = apply(df, prev, curr, prev_type, curr_type)
return get_ancestors_successors_from_graph(G, types, ref_type=ref_type, all_docs=all_docs)
def get_ancestors_successors_from_graph(G, types, ref_type="", all_docs=None):
list_corresp = []
nodes_ref_type = {x for x,y in types.items() if y == ref_type}
index = 0
for node in nodes_ref_type:
all_ancestors = set(nx.ancestors(G, node))
all_descendants = set(nx.descendants(G, node))
all_nodes = all_ancestors.union(all_descendants).union({node})
for n2 in all_nodes:
new_el = {"node": n2, "type": types[n2], "case:concept:name": str(index)}
list_corresp.append(new_el)
index = index + 1
if all_docs is not None:
all_docs = all_docs.difference(nodes_ref_type)
for node in all_docs:
new_el = {"node": node, "type": "", "case:concept:name": str(index)}
list_corresp.append(new_el)
index = index + 1
if list_corresp:
dataframe = pd.DataFrame(list_corresp).sort_values("node")
else:
dataframe = | pd.DataFrame({"node": []}) | pandas.DataFrame |
import pymongo
from pymongo import MongoClient
from tkinter import *
import time;
import datetime
import random
from tkinter import messagebox
import numpy as np
import pandas as pd
from tkinter import simpledialog
#GLOBAL VALUES
d_c = []
x = pd.DataFrame()
y = pd.DataFrame()
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = pd.DataFrame()
y_test = pd.DataFrame()
X_poly = pd.DataFrame()
y_pred = pd.DataFrame()
alldata = pd.DataFrame()
radio = []
radio1 = []
Values2 = []
Values1 = []
Values = []
ScaleV = 0
SplitV = 0
size = 0
algs = str(0)
answer = str(0)
def fourth():
root4 = Tk()
root4.overrideredirect(True)
root4.geometry("{0}x{1}+0+0".format(root4.winfo_screenwidth(), root4.winfo_screenheight()))
root4.title("Store Name")
#-------------------------------------------------------------------------------------------------------------------------------------------
global y_pred,x,y,X_train, X_test, y_train, y_test,X_poly,ScaleV,SplitV,Yscale,algs,answer,size
predictor = StringVar()
predicted = StringVar()
k = []
tp = []
try:
col = list(y.columns)
col1 = list(y)
for i in range(0,10):
for j in col1:
k.append(y[j][i])
t = y_pred[i][0]
tp.append(round(t,2))
except:
print("went wrong")
pass
#-------------------------------------------------------------------------------------------------------------------------------------------
Titlecard = Frame(root4, width = 1280, height = 100, bd = 7, bg = 'dodgerblue', relief = GROOVE)
Titlecard.pack(side = 'top', anchor = CENTER, fill = X)
rt = time.strftime("%d/%m/%y")
body = Frame(root4, width = 1280, height = 600, bd = 9, bg = 'dodgerblue3', relief = FLAT)
body.pack(side = 'top',expand = 1 ,fill = BOTH)
login = Frame(body, width = 600, height = 400, bd = 7, bg = 'Steelblue2', relief = RAISED)
login.pack(side = TOP, anchor = CENTER ,expand = 1, fill = BOTH, ipady = 20,ipadx = 10)
loginbtns = Frame(body, width = 700, height = 30, bd = 7, bg = 'Steelblue2', relief = RAISED)
loginbtns.pack(side = BOTTOM,anchor = CENTER, fill = X)
#-------------------------------------------------------------------------------------------------------------------------------------------
def predictor1():
global y_pred,x,y,X_train, X_test, y_train, y_test,X_poly,ScaleV,SplitV,Yscale,algs,answer,size
pro = round(float(predictor.get()),2)
pru = str(str(pro) + ',')
lsp = pru.split(',')
prel = lsp[:-1]
pre = pd.DataFrame(prel)
if len(x) != 0 and len(y) != 0:
if SplitV == 1 and ScaleV == 1 :
size1 = size
yscale = Yscale
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = size1, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
if yscale > 0:
y_train = sc_X.fit_transform(y_train)
y_test = sc_X.transform(y_test)
if str(algs) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Multiple Linear Regression":
pass
elif str(algs) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train,y_train)
from sklearn.preprocessing import PolynomialFeatures
## answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(X_train)
reg2 = LinearRegression()
reg2.fit(X_poly, np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Support Vector Regression":
## answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Decision Tree Regression":
## answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Random Forest Regression":
## answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(X_train,np.ravel( y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
predicted.set(predicted1)
elif SplitV == 1 and ScaleV == 0:
size1 = size
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = size1, random_state = 0)
if str(algs) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Multiple Linear Regression":
pass
elif str(algs) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train,np.ravel(y_train))
from sklearn.preprocessing import PolynomialFeatures
## answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(X_train)
reg2 = LinearRegression()
reg2.fit(X_poly,np.ravel( y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Support Vector Regression":
## answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Decision Tree Regression":
## answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(X_train,np.ravel(y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Random Forest Regression":
## answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(X_train,np.ravel( y_train))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
predicted.set(predicted1)
elif SplitV == 0 and ScaleV == 1:
yscale1 = Yscale
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
x = sc_X.fit_transform(x)
if yscale1 > 0:
y = sc_X.fit_transform(y)
if str(algs) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Multiple Linear Regression":
pass
elif str(algs) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x,np.ravel(y))
from sklearn.preprocessing import PolynomialFeatures
## answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(x)
reg2 = LinearRegression()
reg2.fit(X_poly, np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Support Vector Regression":
## answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Decision Tree Regression":
## answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Random Forest Regression":
## answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(x, np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
predicted.set(predicted1)
else:
if str(algs) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Multiple Linear Regression":
pass
elif str(algs) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x,np.ravel(y))
from sklearn.preprocessing import PolynomialFeatures
## answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(x)
reg2 = LinearRegression()
reg2.fit(X_poly, np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Support Vector Regression":
## answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Decision Tree Regression":
## answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(x,np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
elif str(algs) == "Random Forest Regression":
## answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(x, np.ravel(y))
y_pred1 = regressor.predict(pre)
predicted1 = str(y_pred1)
predicted.set(predicted1)
def backk():
global y_pred,x,y,X_train, X_test, y_train, y_test,X_poly,ScaleV,SplitV,Yscale,algs,answer,size,tp,k
y_pred = pd.DataFrame()
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = pd.DataFrame()
y_test = pd.DataFrame()
ScaleV = 0
SplitV = 0
size = 0
algs = str(0)
answer = str(0)
tp = []
k = []
root4.destroy()
third()
def exiit():
qexit = messagebox.askyesno("GUI","DO YOU WISH TO EXIT")
if qexit > 0:
root4.destroy()
#-------------------------------------------------------------------------------------------------------------------------------------------
date1 = Label(Titlecard, text = "DATE:" + rt,relief = GROOVE, width = 17, bd = 7,bg = 'white', fg = 'black',font = ('arial', 15, 'italic'))
date1.pack(side = RIGHT, anchor = NW, pady = 15)
Title = Label(Titlecard, text = "SHOP NAME", relief = GROOVE, width = 15 , bd = 7, bg = 'dodgerblue4',
fg = 'lightSkyblue2', font = ('arial', 20, 'italic'))
Title.pack(side = LEFT,pady = 15, ipadx = 35, padx =45)
logintitle = Label(login, text = "Predicted values :", relief = FLAT, width = 10 , bd = 6, bg = 'black',
fg = 'Steelblue', font = ('arial', 20, 'italic'))
logintitle.grid(row = 0, column = 0, columnspan = 3)
#-------------------------------------------------------------------------------------------------------------------------------------------
Label(login, text = "Predicted values :", relief = FLAT, width = 15 , bd = 6, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 0, column = 1)
Label(login, text = "Dependent values :", relief = FLAT, width = 15 , bd = 6, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 0, column = 2)
Label(login, text = "Enter the value \nto predict :", relief = FLAT, width = 15 , bd = 6, bg = 'Steelblue2',
fg = 'Steelblue2', font = ('arial', 20, 'italic')).grid(row = 0, column = 3)
Label(login, text = "Enter the value \nto predict :", relief = FLAT, width = 15 , bd = 6, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 0, column = 4)
Entry(login, relief=SUNKEN,font = ('arial', 15, 'italic'), textvariable = predictor,
bd = 9, insertwidth = 3).grid(row=1,column=4,pady = 20)
Label(login, text = "Predicted value :", relief = FLAT, width = 15 , bd = 6, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 2, column = 4)
Label(login, textvariable = predicted, relief=FLAT,font = ('arial', 15, 'italic'),width = 15 , bd = 6, bg = 'white',
fg = 'black').grid(row=3,column=4,pady = 20)
btn1 = Button(login, text = "PREDICT",command=predictor1, relief = GROOVE, width = 10 , bd = 5, bg = 'Steelblue2',
fg = 'blue2', font = ('arial', 20, 'italic')).grid(row = 1, column = 5)
btn1 = Button(loginbtns, text = "BACK" ,command = backk, relief = RAISED, width = 10 , bd = 6, bg = 'Steelblue2',
fg = 'blue2', font = ('arial', 20, 'italic')).pack(side =LEFT, anchor = CENTER,expand = 2, fill = X,ipady = 6)
btn2 = Button(loginbtns, text = "EXIT",command = exiit, relief = RAISED, width = 10 , bd = 6, bg = 'Steelblue2',
fg = 'blue2', font = ('arial', 20, 'italic')).pack(side =LEFT, anchor = CENTER,expand = 2, fill = X,ipady = 6)
try:
r = 1
for i in range(6):
Label(login, text = str(tp[i]), relief = GROOVE, width = 15 , bd = 4, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = r, column = 1)
r = r + 1
r = 1
for i in range(6):
Label(login, text = str(round(k[i],2)), relief = GROOVE, width = 15 , bd = 4, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = r, column = 2)
r = r + 1
except:
print("something here went wrong")
Label(login, text = "Couldn't\n import \ndata", relief = GROOVE, width = 15 , bd = 4, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 1, column = 1)
Label(login, text = "Couldn't\n import \ndata", relief = GROOVE, width = 15 , bd = 4, bg = 'Steelblue2',
fg = 'black', font = ('arial', 20, 'italic')).grid(row = 1, column = 2)
pass
root4.mainloop()
#-------------------------------------------------------------------------------------------------------------------------------------------
def third():
root2 = Tk()
root2.overrideredirect(True)
root2.geometry("{0}x{1}+0+0".format(root2.winfo_screenwidth(), root2.winfo_screenheight()))
root2.title("GUI for ML algorithims")
#-------------------------------------------------------------------------------------------------------------------------------------------
Titlecard = Frame(root2, width = 1280, height = 100, bd = 7, bg = 'blue', relief = GROOVE)
Titlecard.pack(side = 'top', anchor = CENTER, fill = X)
rt = time.strftime("%d/%m/%y")
body = Frame(root2, width = 1280, height = 600, bd = 9, bg = 'dodgerblue3', relief = FLAT)
body.pack(side = 'top',expand=1,fill = BOTH)
login = Frame(body, width = 1000, height = 600, bd = 7, bg = 'dodgerblue3', relief = RAISED)
login.pack(side = TOP,expand=1, anchor = CENTER, fill = BOTH, ipady = 40,ipadx = 10)
loginbtns = Frame(body, width = 700, height = 50, bd = 7, bg = 'Steelblue2', relief = RAISED)
loginbtns.pack(side = BOTTOM,anchor = CENTER, fill = X)
#-------------------------------------------------------------------------------------------------------------------------------------------
Scale = IntVar()
Split = IntVar()
Spsize = StringVar()
tkvar = StringVar()
#-------------------------------------------------------------------------------------------------------------------------------------------
def back():
global d_c,alldata,x,y,radio,radio1,Values,Values1,Values2,X_train, X_test, y_train, y_test,y_pred
radio = []
radio1 = []
Values2 = []
Values1 = []
Values = []
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = pd.DataFrame()
y_test = pd.DataFrame()
y_pred = pd.DataFrame()
root2.destroy()
second()
def okay():
global x,y,X_train, X_test, y_train, y_test,X_poly,ScaleV,SplitV,Yscale,algs,answer,size,y_pred
if len(x) != 0 and len(y) != 0:
ScaleV = Scale.get()
SplitV = Split.get()
algs = str(tkvar.get())
if Split.get() == 1 and Scale.get() == 1 :
size = float(Spsize.get())
yscale = messagebox.askyesno("GUI","Do you want to scale dependent variable?")
Yscale = yscale
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = size, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
if yscale > 0:
y_train = sc_X.fit_transform(y_train)
y_test = sc_X.transform(y_test)
algs = str(tkvar().get)
if str(tkvar.get()) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Multiple Linear Regression":
pass
elif str(tkvar.get()) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train,np.ravel(y_train))
from sklearn.preprocessing import PolynomialFeatures
answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(X_train)
reg2 = LinearRegression()
reg2.fit(X_poly, np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Support Vector Regression":
answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Decision Tree Regression":
answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Random Forest Regression":
answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(X_train, np.ravel(y_train))
y_pred = regressor.predict(X_test)
root2.destroy()
fourth()
elif Split.get() == 1 and Scale.get() == 0:
size = float(Spsize.get())
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = size, random_state = 0)
if str(tkvar.get()) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Multiple Linear Regression":
pass
elif str(tkvar.get()) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train,y_train)
from sklearn.preprocessing import PolynomialFeatures
answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(X_train)
reg2 = LinearRegression()
reg2.fit(X_poly, np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Support Vector Regression":
answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Decision Tree Regression":
answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(X_train,np.ravel(y_train))
y_pred = regressor.predict(X_test)
elif str(tkvar.get()) == "Random Forest Regression":
answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(X_train, np.ravel(y_train))
y_pred = regressor.predict(X_test)
root2.destroy()
fourth()
elif Split.get() == 0 and Scale.get() == 1:
yscale1 = messagebox.askyesno("GUI","Do you want to scale dependent variable?")
Yscale = yscale1
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
x = sc_X.fit_transform(x)
if yscale1 > 0:
y = sc_X.fit_transform(y)
if str(tkvar.get()) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Multiple Linear Regression":
pass
elif str(tkvar.get()) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x,y)
from sklearn.preprocessing import PolynomialFeatures
answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(x)
reg2 = LinearRegression()
reg2.fit(X_poly, y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Support Vector Regression":
answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Decision Tree Regression":
answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Random Forest Regression":
answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(x, y)
y_pred = regressor.predict(x)
root2.destroy()
fourth()
else:
if str(tkvar.get()) == "Simple Linear Regression":
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Multiple Linear Regression":
pass
elif str(tkvar.get()) == "Polynomial Regression":
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(x,y)
from sklearn.preprocessing import PolynomialFeatures
answer = simpledialog.askstring("GUI", ["Degree:"])
poly_reg = PolynomialFeatures(degree = int(answer))
X_poly = poly_reg.fit_transform(x)
reg2 = LinearRegression()
reg2.fit(X_poly, y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Support Vector Regression":
answer = simpledialog.askstring("GUI", ["Kernel:"])
from sklearn.svm import SVR
regressor = SVR(kernel = answer)
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Decision Tree Regression":
answer = simpledialog.askstring("GUI", ["Random state:"])
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = int(answer))
regressor.fit(x,y)
y_pred = regressor.predict(x)
elif str(tkvar.get()) == "Random Forest Regression":
answer = simpledialog.askstring("GUI", ["n_estimators:"])
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = int(answer), random_state = 0)
regressor.fit(x, y)
y_pred = regressor.predict(x)
root2.destroy()
fourth()
#-------------------------------------------------------------------------------------------------------------------------------------------
date1 = Label(Titlecard, text = "DATE:" + rt,relief = GROOVE, width = 17, bd = 7,bg = 'white', fg = 'black',font = ('arial', 15, 'italic'))
date1.pack(side = RIGHT, anchor = NW, pady = 15)
Title = Label(Titlecard, text = "GUI for ML algorithims", relief = GROOVE, width = 15 , bd = 7, bg = 'dodgerblue4',
fg = 'lightSkyblue2', font = ('arial', 20, 'italic'))
Title.pack(side = LEFT,pady = 15, ipadx = 35, padx =45)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=0,column=0,padx = 25, pady = 15,ipady = 2)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=1,column=0,padx = 25, pady = 15,ipady = 2)
#heading
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=25, bd = 4, fg = 'black',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=1,column=0,padx = 25, pady = 15,ipady = 2)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=1,column=2,padx = 25, pady = 15,ipady = 2)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=3,column=2,padx = 25, pady = 15,ipady = 2)
Radiobutton(login, text = "YES",value = 1,variable=Scale,indicatoron=0
,bg = 'steelblue',font = ('arial', 15, 'bold')).grid(row = 2,column = 0,padx =5, ipadx =15)
Radiobutton(login, text = "NO",value = 2,variable=Scale,indicatoron=0
,bg = 'steelblue',font = ('arial', 15, 'bold')).grid(row = 4,column = 0,padx =5, ipadx =15)
#heading
Label(login, text="Do you wish to split the data? ", relief=FLAT,width=25, bd = 4, fg = 'black',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=1,column=2,padx = 25, pady = 15,ipady = 2)
Radiobutton(login, text = "YES",value = 1,variable=Split,indicatoron=0
,bg = 'steelblue',font = ('arial', 15, 'bold')).grid(row = 2,column = 2,padx =5, ipadx =15)
Radiobutton(login, text = "NO",value = 2,variable=Split,indicatoron=0
,bg = 'steelblue',font = ('arial', 15, 'bold')).grid(row = 4,column = 2,padx =5, ipadx =15)
#heading
Label(login, text="Enter split size : ", relief=FLAT,width=25, bd = 4, fg = 'black',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=1,column=4,padx = 25, pady = 15,ipady = 2)
Entry(login,relief=SUNKEN,font = ('arial', 15, 'italic'), textvariable = Spsize,
bd = 9, insertwidth = 3).grid(row=2,column=4)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=5,column=0,padx = 25, pady = 7,ipady = 2)
#dummy
Label(login, text="Do you wish to scale the datas? ", relief=FLAT,width=20, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=7,column=0,padx = 25, pady = 7,ipady = 2)
#heading
Label(login, text="Select your algorithim : ", relief=FLAT,width=30, bd = 4, fg = 'black',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=6,column=0,padx = 25, pady = 7,ipady = 2)
#heading_under construction
Label(login, text="Select your error correction : ", relief=FLAT,width=30, bd = 4, fg = 'dodgerblue3',bg = 'dodgerblue3',
font = ('arial', 15, 'bold')).grid(row=8,column=0,padx = 25, pady = 7,ipady = 2)
choices = { 'Simple Linear Regression','Multiple Linear Regression','Polynomial Regression',
'Support Vector Regression','Decision Tree Regression','Random Forest Regression'}
tkvar.set('Simple Linear Regression') # set the default option
popupMenu = OptionMenu(login, tkvar, *choices)
popupMenu.config(fg = 'black',bg = 'dodgerblue3', relief=GROOVE, bd = 7)
popupMenu["menu"].config(fg = 'black',bg = 'dodgerblue3', relief=FLAT ,bd = 10)
popupMenu.grid(row=6,column=3,columnspan=4,padx = 30, pady = 7,ipadx = 25)
btn1 = Button(loginbtns, text = "OKAY",command=okay, relief = RAISED, width = 10 , bd = 6, bg = 'Steelblue2',
fg = 'blue2', font = ('arial', 20, 'italic')).pack(side =LEFT, anchor = CENTER,expand = 2, fill = X)
btn3 = Button(loginbtns, text = "BACK",command=back, relief = RAISED, width = 10 , bd = 6, bg = 'Steelblue2',
fg = 'blue2', font = ('arial', 20, 'italic')).pack(side =LEFT, anchor = CENTER,expand = 2, fill = X)
root2.mainloop()
def second():
root1 = Tk()
root1.overrideredirect(True)
root1.geometry("{0}x{1}+0+0".format(root1.winfo_screenwidth(), root1.winfo_screenheight()))
root1.title("GUI for ML algorithims")
#-------------------------------------------------------------------------------------------------------------------------------------------
Titlecard = Frame(root1, width = 1280, height = 100, bd = 7, bg = 'blue', relief = GROOVE)
Titlecard.pack(side = 'top', anchor = CENTER, fill = X)
rt = time.strftime("%d/%m/%y")
body = Frame(root1, width = 1280, height = 600, bd = 9, bg = 'dodgerblue3', relief = FLAT)
body.pack(side = 'top',expand = 1,fill = BOTH)
login = Frame(body, width = 1000, height = 600, bd = 7, bg = 'dodgerblue3', relief = RAISED)
login.pack(side = TOP, anchor = CENTER,expand=1, fill = X, ipady = 40,ipadx = 10)
#-------------------------------------------------------------------------------------------------------------------------------------------
var = IntVar()
var1 = IntVar()
global d_c,radio,radio1,Values,Values1,Values2
for i in range(len(d_c)):
text = str(d_c[i])
Values.append(text)
length = len(alldata[text])
Values1.append(length)
if length != len(alldata):
g = len(alldata) - length
Values2.append(g)
else:
Values2.append('NULL')
text1 = str(str(text) + "1")
text1 = IntVar()
radio.append(text1)
text2 = str(str(text) + "2")
text2 = IntVar()
radio1.append(text2)
rn = len(d_c)
#-------------------------------------------------------------------------------------------------------------------------------------------
def back():
global d_c,alldata,x,y,radio,radio1,Values,Values1,Values2
root1.destroy()
d_c = []
x = pd.DataFrame()
y = pd.DataFrame()
alldata = | pd.DataFrame() | pandas.DataFrame |
import os
from collections import Counter
from os import listdir
from os.path import isfile, join
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from matplotlib import style
style.use('ggplot')
import scipy
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import numpy as np
from sys import argv
import Orange
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mcolors, cm
from matplotlib.collections import PolyCollection
from classifiers import classifiers_list
from datasetsDelaunay import dataset_list_bi, dataset_list_mult
from folders import output_dir, dir_pca_biclasse, metricas_biclasse, dir_pca_multiclasse, metricas_multiclasse
from parameters import order, alphas
order_dict = {'area': 1,
'volume': 2,
'area_volume_ratio': 3,
'edge_ratio': 4,
'radius_ratio': 5,
'aspect_ratio': 6,
'max_solid_angle': 7,
'min_solid_angle': 8,
'solid_angle': 9}
class Statistics:
def __init__(self):
pass
def compute_CD_customizado(self, avranks, n, alpha="0.05", test="nemenyi"):
"""
Returns critical difference for Nemenyi or Bonferroni-Dunn test
according to given alpha (either alpha="0.05" or alpha="0.1") for average
ranks and number of tested datasets N. Test can be either "nemenyi" for
for Nemenyi two tailed test or "bonferroni-dunn" for Bonferroni-Dunn test.
"""
k = len(avranks)
d = {("nemenyi", "0.05"): [1.960, 2.344, 2.569, 2.728, 2.850, 2.948, 3.031, 3.102, 3.164, 3.219, 3.268, 3.313,
3.354, 3.391, 3.426,
3.458, 3.489, 3.517, 3.544, 3.569, 3.593, 3.616, 3.637, 3.658, 3.678, 3.696, 3.714,
3.732, 3.749, 3.765,
3.780, 3.795, 3.810, 3.824, 3.837, 3.850, 3.863, 3.876, 3.888, 3.899, 3.911, 3.922,
3.933, 3.943, 3.954,
3.964, 3.973, 3.983, 3.992],
("nemenyi", "0.1"): [0, 0, 1.644854, 2.052293, 2.291341, 2.459516,
2.588521, 2.692732, 2.779884, 2.854606, 2.919889,
2.977768, 3.029694, 3.076733, 3.119693, 3.159199,
3.195743, 3.229723, 3.261461, 3.291224, 3.319233],
("bonferroni-dunn", "0.05"): [0, 0, 1.960, 2.241, 2.394, 2.498, 2.576,
2.638, 2.690, 2.724, 2.773],
("bonferroni-dunn", "0.1"): [0, 0, 1.645, 1.960, 2.128, 2.241, 2.326,
2.394, 2.450, 2.498, 2.539]}
q = d[(test, alpha)]
cd = q[k] * (k * (k + 1) / (6.0 * n)) ** 0.5
return cd
def calcula_media_folds_biclasse(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_biclasse_' + mode + '.csv', index=False)
def calcula_media_folds_multiclass(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_multiclass_' + mode + '.csv', index=False)
def separa_delaunay_biclass(self, filename):
df = pd.read_csv(filename)
list_base = []
for p in np.arange(0, len(preproc_type)):
list_base.append(df[(df['PREPROC'] == preproc_type[p])])
df_base = list_base.pop(0)
for i in np.arange(0, len(list_base)):
df_base = pd.concat([df_base, list_base[i]], ignore_index=True)
for o in order:
for a in alphas:
dfr = df[(df['ORDER'] == o)]
dfr1 = dfr[(dfr['ALPHA'] == str(a))]
df_file = pd.concat([df_base, dfr1], ignore_index=True)
df_file.to_csv('./../output_dir/result_biclass' + '_' + o + '_' + str(a) + '.csv', index=False)
def read_dir_files(self, dir_name):
f = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return f
def find_best_rank(self, results_dir, tipo):
results = self.read_dir_files(results_dir)
df = pd.DataFrame(columns=[['ARQUIVO', 'WINER']])
i = 0
for f in results:
df_temp = pd.read_csv(results_dir + f)
df.at[i, 'ARQUIVO'] = f
df.at[i, 'WINER'] = df_temp.iloc[0, 0]
i += 1
df.to_csv(output_dir + tipo)
def find_best_delaunay(self, results_dir, tipo):
df = pd.read_csv(results_dir + tipo)
i = 0
j = 0
df_best = pd.DataFrame(columns=['ARQUIVO', 'WINER'])
win = list(df['WINER'])
for w in win:
if w == 'DELAUNAY':
df_best.at[i, 'ARQUIVO'] = df.iloc[j, 1]
df_best.at[i, 'WINER'] = df.iloc[j, 2]
i += 1
j += 1
df_best.to_csv(output_dir + 'only_best_delaunay_pca_biclass_media_rank.csv')
def rank_by_algorithm(self, df, tipo, wd, reducao, order, alpha):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param delaunay_type:
:return:
'''
df_tabela = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE',
'DELAUNAY', 'RANK_DELAUNAY', 'DELAUNAY_TYPE', 'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df.to_csv(dir_pca_biclasse + reducao + '_' + tipo + '_' + order + '_' + str(alpha) + '.csv')
j = 0
for d in dataset_list_bi:
for m in metricas_biclasse:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_tabela.at[j, 'DATASET'] = d
df_tabela.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_tabela.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_tabela.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_tabela.at[j, 'DELAUNAY'] = aux.at[indice, m]
df_tabela.at[j, 'DELAUNAY_TYPE'] = order
df_tabela.at[j, 'ALPHA'] = alpha
df_tabela.at[j, 'unit'] = m
j += 1
df_pre = df_tabela[df_tabela['unit'] == 'PRE']
df_rec = df_tabela[df_tabela['unit'] == 'REC']
df_spe = df_tabela[df_tabela['unit'] == 'SPE']
df_f1 = df_tabela[df_tabela['unit'] == 'F1']
df_geo = df_tabela[df_tabela['unit'] == 'GEO']
df_iba = df_tabela[df_tabela['unit'] == 'IBA']
df_auc = df_tabela[df_tabela['unit'] == 'AUC']
pre = df_pre[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
rec = df_rec[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
spe = df_spe[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
f1 = df_f1[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
geo = df_geo[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
iba = df_iba[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
auc = df_auc[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
pre = pre.reset_index()
pre.drop('index', axis=1, inplace=True)
rec = rec.reset_index()
rec.drop('index', axis=1, inplace=True)
spe = spe.reset_index()
spe.drop('index', axis=1, inplace=True)
f1 = f1.reset_index()
f1.drop('index', axis=1, inplace=True)
geo = geo.reset_index()
geo.drop('index', axis=1, inplace=True)
iba = iba.reset_index()
iba.drop('index', axis=1, inplace=True)
auc = auc.reset_index()
auc.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
pre_rank = pre.rank(axis=1, ascending=False)
rec_rank = rec.rank(axis=1, ascending=False)
spe_rank = spe.rank(axis=1, ascending=False)
f1_rank = f1.rank(axis=1, ascending=False)
geo_rank = geo.rank(axis=1, ascending=False)
iba_rank = iba.rank(axis=1, ascending=False)
auc_rank = auc.rank(axis=1, ascending=False)
df_pre = df_pre.reset_index()
df_pre.drop('index', axis=1, inplace=True)
df_pre['RANK_ORIGINAL'] = pre_rank['ORIGINAL']
df_pre['RANK_SMOTE'] = pre_rank['SMOTE']
df_pre['RANK_SMOTE_SVM'] = pre_rank['SMOTE_SVM']
df_pre['RANK_BORDERLINE1'] = pre_rank['BORDERLINE1']
df_pre['RANK_BORDERLINE2'] = pre_rank['BORDERLINE2']
df_pre['RANK_GEOMETRIC_SMOTE'] = pre_rank['GEOMETRIC_SMOTE']
df_pre['RANK_DELAUNAY'] = pre_rank['DELAUNAY']
df_rec = df_rec.reset_index()
df_rec.drop('index', axis=1, inplace=True)
df_rec['RANK_ORIGINAL'] = rec_rank['ORIGINAL']
df_rec['RANK_SMOTE'] = rec_rank['SMOTE']
df_rec['RANK_SMOTE_SVM'] = rec_rank['SMOTE_SVM']
df_rec['RANK_BORDERLINE1'] = rec_rank['BORDERLINE1']
df_rec['RANK_BORDERLINE2'] = rec_rank['BORDERLINE2']
df_rec['RANK_GEOMETRIC_SMOTE'] = rec_rank['GEOMETRIC_SMOTE']
df_rec['RANK_DELAUNAY'] = rec_rank['DELAUNAY']
df_spe = df_spe.reset_index()
df_spe.drop('index', axis=1, inplace=True)
df_spe['RANK_ORIGINAL'] = spe_rank['ORIGINAL']
df_spe['RANK_SMOTE'] = spe_rank['SMOTE']
df_spe['RANK_SMOTE_SVM'] = spe_rank['SMOTE_SVM']
df_spe['RANK_BORDERLINE1'] = spe_rank['BORDERLINE1']
df_spe['RANK_BORDERLINE2'] = spe_rank['BORDERLINE2']
df_spe['RANK_GEOMETRIC_SMOTE'] = spe_rank['GEOMETRIC_SMOTE']
df_spe['RANK_DELAUNAY'] = spe_rank['DELAUNAY']
df_f1 = df_f1.reset_index()
df_f1.drop('index', axis=1, inplace=True)
df_f1['RANK_ORIGINAL'] = f1_rank['ORIGINAL']
df_f1['RANK_SMOTE'] = f1_rank['SMOTE']
df_f1['RANK_SMOTE_SVM'] = f1_rank['SMOTE_SVM']
df_f1['RANK_BORDERLINE1'] = f1_rank['BORDERLINE1']
df_f1['RANK_BORDERLINE2'] = f1_rank['BORDERLINE2']
df_f1['RANK_GEOMETRIC_SMOTE'] = f1_rank['GEOMETRIC_SMOTE']
df_f1['RANK_DELAUNAY'] = f1_rank['DELAUNAY']
df_geo = df_geo.reset_index()
df_geo.drop('index', axis=1, inplace=True)
df_geo['RANK_ORIGINAL'] = geo_rank['ORIGINAL']
df_geo['RANK_SMOTE'] = geo_rank['SMOTE']
df_geo['RANK_SMOTE_SVM'] = geo_rank['SMOTE_SVM']
df_geo['RANK_BORDERLINE1'] = geo_rank['BORDERLINE1']
df_geo['RANK_BORDERLINE2'] = geo_rank['BORDERLINE2']
df_geo['RANK_GEOMETRIC_SMOTE'] = geo_rank['GEOMETRIC_SMOTE']
df_geo['RANK_DELAUNAY'] = geo_rank['DELAUNAY']
df_iba = df_iba.reset_index()
df_iba.drop('index', axis=1, inplace=True)
df_iba['RANK_ORIGINAL'] = iba_rank['ORIGINAL']
df_iba['RANK_SMOTE'] = iba_rank['SMOTE']
df_iba['RANK_SMOTE_SVM'] = iba_rank['SMOTE_SVM']
df_iba['RANK_BORDERLINE1'] = iba_rank['BORDERLINE1']
df_iba['RANK_BORDERLINE2'] = iba_rank['BORDERLINE2']
df_iba['RANK_GEOMETRIC_SMOTE'] = iba_rank['GEOMETRIC_SMOTE']
df_iba['RANK_DELAUNAY'] = iba_rank['DELAUNAY']
df_auc = df_auc.reset_index()
df_auc.drop('index', axis=1, inplace=True)
df_auc['RANK_ORIGINAL'] = auc_rank['ORIGINAL']
df_auc['RANK_SMOTE'] = auc_rank['SMOTE']
df_auc['RANK_SMOTE_SVM'] = auc_rank['SMOTE_SVM']
df_auc['RANK_BORDERLINE1'] = auc_rank['BORDERLINE1']
df_auc['RANK_BORDERLINE2'] = auc_rank['BORDERLINE2']
df_auc['RANK_GEOMETRIC_SMOTE'] = auc_rank['GEOMETRIC_SMOTE']
df_auc['RANK_DELAUNAY'] = auc_rank['DELAUNAY']
# avarege rank
media_pre_rank = pre_rank.mean(axis=0)
media_rec_rank = rec_rank.mean(axis=0)
media_spe_rank = spe_rank.mean(axis=0)
media_f1_rank = f1_rank.mean(axis=0)
media_geo_rank = geo_rank.mean(axis=0)
media_iba_rank = iba_rank.mean(axis=0)
media_auc_rank = auc_rank.mean(axis=0)
media_pre_rank_file = media_pre_rank.reset_index()
media_pre_rank_file = media_pre_rank_file.sort_values(by=0)
media_rec_rank_file = media_rec_rank.reset_index()
media_rec_rank_file = media_rec_rank_file.sort_values(by=0)
media_spe_rank_file = media_spe_rank.reset_index()
media_spe_rank_file = media_spe_rank_file.sort_values(by=0)
media_f1_rank_file = media_f1_rank.reset_index()
media_f1_rank_file = media_f1_rank_file.sort_values(by=0)
media_geo_rank_file = media_geo_rank.reset_index()
media_geo_rank_file = media_geo_rank_file.sort_values(by=0)
media_iba_rank_file = media_iba_rank.reset_index()
media_iba_rank_file = media_iba_rank_file.sort_values(by=0)
media_auc_rank_file = media_auc_rank.reset_index()
media_auc_rank_file = media_auc_rank_file.sort_values(by=0)
# Grava arquivos importantes
df_pre.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
df_rec.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
df_spe.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
df_f1.to_csv(wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
df_geo.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
df_iba.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
df_auc.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
media_auc_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
'''avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()'''
print('Delaunay Type= ', delaunay_type)
print('Algorithm= ', name)
def rank_total_by_algorithm(self, tipo, wd, reducao, order, alpha):
delaunay_name = 'RANK_DTO_' + str(order) + '_' + str(alpha)
cols = ['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1',
'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
for name in classifiers_list:
print(os.path.abspath(os.getcwd()))
# Grava arquivos importantes
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_pre.csv'
df_pre = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_rec.csv'
df_rec = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_spe.csv'
df_spe = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_f1.csv'
df_f1 = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_geo.csv'
df_geo = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_iba.csv'
df_iba = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_auc.csv'
df_auc = pd.read_csv(path_name)
# PRE
df_pre_col = df_pre[cols]
df_pre_col.loc[:, delaunay_name] = df_pre_col['RANK_DELAUNAY'].values
df_pre_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_pre = df_pre_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv'
ranking_pre['ALGORITHM'] = name
ranking_pre.to_csv(path_name, index=False)
# REC
df_rec_col = df_rec[cols]
df_rec_col.loc[:, delaunay_name] = df_rec_col['RANK_DELAUNAY'].values
df_rec_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_rec = df_rec_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv'
ranking_rec['ALGORITHM'] = name
ranking_rec.to_csv(path_name, index=False)
# SPE
df_spe_col = df_spe[cols]
df_spe_col.loc[:, delaunay_name] = df_spe_col['RANK_DELAUNAY'].values
df_spe_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_spe = df_spe_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv'
ranking_spe['ALGORITHM'] = name
ranking_spe.to_csv(path_name, index=False)
# F1
df_f1_col = df_f1[cols]
df_f1_col.loc[:, delaunay_name] = df_f1_col['RANK_DELAUNAY'].values
df_f1_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_f1 = df_f1_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv'
ranking_f1['ALGORITHM'] = name
ranking_f1.to_csv(path_name, index=False)
# GEO
df_geo_col = df_geo[cols]
df_geo_col.loc[:, delaunay_name] = df_geo_col['RANK_DELAUNAY'].values
df_geo_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_geo = df_geo_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv'
ranking_geo['ALGORITHM'] = name
ranking_geo.to_csv(path_name, index=False)
# IBA
df_iba_col = df_iba[cols]
df_iba_col.loc[:, delaunay_name] = df_iba_col['RANK_DELAUNAY'].values
df_iba_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_iba = df_iba_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv'
ranking_iba['ALGORITHM'] = name
ranking_iba.to_csv(path_name, index=False)
# AUC
df_auc_col = df_auc[cols]
df_auc_col.loc[:, delaunay_name] = df_auc_col['RANK_DELAUNAY'].values
df_auc_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_auc = df_auc_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv'
ranking_auc['ALGORITHM'] = name
ranking_auc.to_csv(path_name, index=False)
def rank_by_algorithm_dataset(self, filename):
df = pd.read_csv(filename)
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv('./../output_dir/rank/rank_algorithm_dataset_' + name + '_' + name1 + '.csv', index=False)
def rank_by_algorithm_dataset_only_dto(self, filename):
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv(
'./../output_dir/rank/only_dto/rank_algorithm_dataset_only_dto_' + name + '_' + name1 + '.csv',
index=False)
df_graph = group1.copy()
df_graph = df_graph.replace('area', 1)
df_graph = df_graph.replace('volume', 2)
df_graph = df_graph.replace('area_volume_ratio', 3)
df_graph = df_graph.replace('edge_ratio', 4)
df_graph = df_graph.replace('radius_ratio', 5)
df_graph = df_graph.replace('aspect_ratio', 6)
df_graph = df_graph.replace('max_solid_angle', 7)
df_graph = df_graph.replace('min_solid_angle', 8)
df_graph = df_graph.replace('solid_angle', 9)
legend = ['area', 'volume', 'area_volume_ratio', 'edge_ratio', 'radius_ratio', 'aspect_ratio',
'max_solid_angle', 'min_solid_angle', 'solid_angle']
x = df_graph['ORDER'].values
y = df_graph['ALPHA'].values.astype(float)
dz = df_graph['AUC'].values
N = x.shape[0]
z = np.zeros(N)
dx = 0.2 * np.ones(N)
dy = 0.2 * np.ones(N)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(111, projection='3d')
cs = ['r', 'g', 'b'] * 9
ax1.bar3d(x, y, z, dx, dy, dz, color=cs)
ax1.set_ylabel('Alpha')
ax1.set_xlabel('\n\n\n\n\nGeometry')
ax1.set_zlabel('AUC')
ax1.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax1.set_xticklabels(legend)
ax1.legend()
plt.show()
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig)
surf = ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.5)
fig.colorbar(surf, shrink=0.5, aspect=7)
ax.set_xlabel('Alpha')
ax.set_ylabel('\n\n\n\n\nGeometry')
ax.set_zlabel('AUC')
ax.set_title('Geometry x Alpha \n Algorithm = ' + name + '\n Dataset = ' + name1)
ax.set_yticklabels(legend)
ax.legend()
plt.savefig('./../output_dir/rank/only_dto/only_dto_geometry_by_alpha_' + name + '_' + name1 + '.pdf')
plt.show()
def rank_by_measures_only_dto(self, filename):
best_geometry = pd.DataFrame(columns=['PREPROC', 'M', 'ALGORITHM', 'MEDIA_RANK'])
df = pd.read_csv(filename)
df = df[df['PREPROC'] != '_SMOTE']
df = df[df['PREPROC'] != '_Geometric_SMOTE']
df = df[df['PREPROC'] != '_Borderline1']
df = df[df['PREPROC'] != '_Borderline2']
df = df[df['PREPROC'] != '_smoteSVM']
df = df[df['PREPROC'] != '_train']
i = 0
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
group['rank_f1'] = group['F1'].rank(ascending=False)
group['rank_geo'] = group['GEO'].rank(ascending=False)
group['rank_iba'] = group['IBA'].rank(ascending=False)
group['rank_auc'] = group['AUC'].rank(ascending=False)
# AUC
group = group.sort_values(by=['rank_auc'])
media_rank_auc = group.groupby('PREPROC')['rank_auc'].mean()
df_media_rank_auc = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_AUC'])
df_media_rank_auc['PREPROC'] = media_rank_auc.index
df_media_rank_auc['MEDIA_RANK_AUC'] = media_rank_auc.values
df_media_rank_auc.sort_values(by=['MEDIA_RANK_AUC'], ascending=True, inplace=True)
df_media_rank_auc.reset_index(inplace=True)
df_media_rank_auc.drop('index', axis=1, inplace=True)
best_auc_geometry = df_media_rank_auc.loc[0]
# GEO
group = group.sort_values(by=['rank_geo'])
media_rank_geo = group.groupby('PREPROC')['rank_geo'].mean()
df_media_rank_geo = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_GEO'])
df_media_rank_geo['PREPROC'] = media_rank_geo.index
df_media_rank_geo['MEDIA_RANK_GEO'] = media_rank_geo.values
df_media_rank_geo.sort_values(by=['MEDIA_RANK_GEO'], ascending=True, inplace=True)
df_media_rank_geo.reset_index(inplace=True)
df_media_rank_geo.drop('index', axis=1, inplace=True)
best_geo_geometry = df_media_rank_geo.loc[0]
# IBA
group = group.sort_values(by=['rank_iba'])
media_rank_iba = group.groupby('PREPROC')['rank_iba'].mean()
df_media_rank_iba = pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_IBA'])
df_media_rank_iba['PREPROC'] = media_rank_iba.index
df_media_rank_iba['MEDIA_RANK_IBA'] = media_rank_iba.values
df_media_rank_iba.sort_values(by=['MEDIA_RANK_IBA'], ascending=True, inplace=True)
df_media_rank_iba.reset_index(inplace=True)
df_media_rank_iba.drop('index', axis=1, inplace=True)
best_iba_geometry = df_media_rank_iba.loc[0]
# F1
group = group.sort_values(by=['rank_f1'])
media_rank_f1 = group.groupby('PREPROC')['rank_f1'].mean()
df_media_rank_f1 = | pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_F1']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tables
from phildb.constants import MISSING_VALUE, METADATA_MISSING_VALUE
class TabDesc(tables.IsDescription):
time = tables.Int64Col(dflt=0, pos=0)
value = tables.Float64Col(dflt=np.nan, pos=1)
meta = tables.Int32Col(dflt=0, pos=2)
replacement_time = tables.Int64Col(dflt=0, pos=3)
class LogHandler:
"""
"""
FILTERS = tables.Filters(complib="zlib", complevel=9)
def __init__(self, filename, mode):
self.hdf5 = tables.open_file(filename, mode, filters=self.FILTERS)
def create_skeleton(self):
"""
Create the skeleton of the log self.hdf5.
"""
data_group = self.hdf5.create_group("/", "data", "data group")
try:
new_table = self.hdf5.create_table(data_group, "log", TabDesc)
except tables.exceptions.NodeError as e:
pass
self.hdf5.flush()
def read(self, as_at_datetime):
field_names = ["time", "value", "meta", "replacement_time"]
ts_table = self.hdf5.get_node("/data/log")
records = ts_table.read_where("replacement_time <= {0}".format(as_at_datetime))
if len(records) == 0:
return pd.DataFrame(None, columns=field_names)
df = pd.DataFrame(records, columns=field_names)
df["date"] = | pd.to_datetime(df["time"], unit="s") | pandas.to_datetime |
from rdkit import Chem
from .utils import *
from rdkit.Chem.MolStandardize import rdMolStandardize
import os
import pandas as pd
#==========================================================
# process SMILES of chemotypes
def normChemotypes(compounds,
getChemotypes=False,
getChemotypesIdx=False,
normalizeChemotypes=False,
printlogs=True):
#------------------------
if getChemotypesIdx:
if getChemotypes == False:
print("!!!ERROR: 'getChemotypesIdx=True' argument goes with 'getChemotypes=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonChemotypesList, ChemotypesList, ChemotypesIdxList = [], [], []
Chemotype_count = 0
idx = 0
for compound in compounds_:
Premol = Chem.MolFromSmiles(compound)
canonicalized_mol = rdMolStandardize.Normalize(Premol)
canonicalized_SMILES = Chem.MolToSmiles(canonicalized_mol)
if canonicalized_SMILES == compound:
NonChemotypesList.append(canonicalized_SMILES)
else:
Chemotype_count +=1
if normalizeChemotypes:
NonChemotypesList.append(canonicalized_SMILES)
ChemotypesList.append(compound)
ChemotypesIdxList.append(idx)
else:
ChemotypesList.append(compound)
ChemotypesIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Chemotype_count > 0:
if normalizeChemotypes:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList)-Chemotype_count, len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were normalized".format(Chemotype_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Chemotype normalization has been applied!!!!!")
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were NOT normalized".format(Chemotype_count, len(compounds)))
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Chemotype_count > 0:
if normalizeChemotypes:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList)-Chemotype_count, len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were normalized".format(Chemotype_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Chemotype normalization has been applied!!!!!")
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were NOT normalized".format(Chemotype_count, len(compounds)))
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("=======================================================")
if getChemotypes:
if getChemotypesIdx:
return ChemotypesList, ChemotypesIdxList
else:
return ChemotypesList
else:
return NonChemotypesList
#==========================================================
# process SMILES of tautomers
def normTautomers(compounds,
getTautomers=False,
getTautomersIdx=False,
deTautomerize=False,
printlogs=True):
#------------------------
if getTautomersIdx:
if getTautomers == False:
print("!!!ERROR: 'getTautomersIdx=True' argument goes with 'getTautomers=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonTautomersList, TautomersList, TautomersIdxList = [], [], []
enumerator = rdMolStandardize.TautomerEnumerator()
Tautomer_count = 0
idx = 0
for compound in compounds_:
Premol = Chem.MolFromSmiles(compound)
canonicalized_mol = enumerator.Canonicalize(Premol)
canonicalized_SMILES = Chem.MolToSmiles(canonicalized_mol)
if canonicalized_SMILES == compound:
NonTautomersList.append(canonicalized_SMILES)
else:
Tautomer_count +=1
if deTautomerize:
NonTautomersList.append(canonicalized_SMILES)
TautomersList.append(compound)
TautomersIdxList.append(idx)
else:
TautomersList.append(compound)
TautomersIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Tautomer_count > 0:
if deTautomerize:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList)-Tautomer_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were detautomerized".format(Tautomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Detautomerizing has been applied!!!!!")
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were NOT detautomerized".format(Tautomer_count, len(compounds)))
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Tautomer_count > 0:
if deTautomerize:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList)-Tautomer_count, len(compounds)))
print("{}/{} structure(s) í/are tautomer(s) BUT was/were detautomerized".format(Tautomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Detautomerizing has been applied!!!!!")
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were NOT detautomerized".format(Tautomer_count, len(compounds)))
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("=======================================================")
if getTautomers:
if getTautomersIdx:
return TautomersList, TautomersIdxList
else:
return TautomersList
else:
return NonTautomersList
#==========================================================
# process SMILES of stereoisomers
def normStereoisomers(compounds,
getStereoisomers=False,
getStereoisomersIdx=False,
deSterioisomerize=False,
printlogs=True):
#------------------------
if getStereoisomersIdx:
if getStereoisomers == False:
print("!!!ERROR: 'getStereoisomersIdx=True' argument goes with 'getStereoisomers=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonStereoisomersList, StereoisomersList, StereoisomersIdxList = [], [], []
Stereoisomer_count = 0
idx = 0
for compound in compounds_:
if compound.find("@") == -1 and compound.find("/") == -1 and compound.find("\\") == -1:
NonStereoisomersList.append(compound)
else:
Stereoisomer_count +=1
if deSterioisomerize:
NonStereoisomersList.append(compound.replace("@", "").replace("/","").replace("\\",""))
StereoisomersList.append(compound)
StereoisomersIdxList.append(idx)
else:
StereoisomersList.append(compound)
StereoisomersIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Stereoisomer_count > 0:
if deSterioisomerize:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList)-Stereoisomer_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized".format(Stereoisomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Destereoisomerization has been applied!!!!!")
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized".format(Stereoisomer_count, len(compounds)))
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Stereoisomer_count > 0:
if deSterioisomerize:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList)-Stereoisomer_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized".format(Stereoisomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Destereoisomerization has been applied!!!!!")
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized".format(Stereoisomer_count, len(compounds)))
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("=======================================================")
if getStereoisomers:
if getStereoisomersIdx:
return StereoisomersList, StereoisomersIdxList
else:
return StereoisomersList
else:
return NonStereoisomersList
#==========================================================
# Complete normalization
def normalizeComplete(compounds,
getUnnormalizedStruct=False,
normalizeChemotypes=True,
deTautomerize=True,
deSterioisomerize=True,
removeDuplicates=False,
getDuplicatedIdx=False,
exportCSV=False,
outputPath=None,
printlogs=True):
#------------------------
if getUnnormalizedStruct:
if removeDuplicates:
print("!!!ERROR: 'removeDuplicates=True' argument goes with 'getUnnormalizedStruct=False' only !!!")
return None
if getDuplicatedIdx:
print("!!!ERROR: 'getDuplicatedIdx=True' argument goes with 'getUnnormalizedStruct=False' only !!!")
if getDuplicatedIdx:
if removeDuplicates == False:
print("!!!ERROR: 'getDuplicatedIdx=True' argument goes with 'removeDuplicates=True'!!!")
return None
if exportCSV:
if outputPath == None:
print("!!!ERROR 'exportCSV=True' needs 'outputPath=<Directory>' to be filled !!!")
return None
if outputPath:
if exportCSV == False:
print("!!!ERROR 'outputPath=<Directory>' needs to set 'exportCSV=True' !!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_r1 = molStructVerify(compounds, printlogs=False)
UnverifiedList, UnverifiedIdxList = molStructVerify(compounds, getFailedStruct=True, getFailedStructIdx=True, printlogs=False)
Unverified_count = len(UnverifiedList)
#------------------------
if deSterioisomerize:
compounds_r2 = normStereoisomers(compounds_r1, deSterioisomerize=True, printlogs=False)
else:
compounds_r2 = normStereoisomers(compounds_r1, deSterioisomerize=False, printlogs=False)
StereoisomersList, StereoisomersIdxList = normStereoisomers(compounds_r1, getStereoisomers=True, getStereoisomersIdx=True, printlogs=False)
Stereoisomers_count = len(StereoisomersList)
#------------------------
if normalizeChemotypes:
compounds_r3 = normChemotypes(compounds_r2, normalizeChemotypes=True, printlogs=False)
else:
compounds_r3 = normChemotypes(compounds_r2, normalizeChemotypes=False, printlogs=False)
ChemotypesList, ChemotypesIdxList = normChemotypes(compounds_r1, getChemotypes=True, getChemotypesIdx=True, printlogs=False)
Chemotypes_count = len(ChemotypesList)
#------------------------
if deTautomerize:
compounds_r4 = normTautomers(compounds_r3, deTautomerize=True, printlogs=False)
else:
compounds_r4 = normTautomers(compounds_r3, deTautomerize=False, printlogs=False)
TautomersList, TautomersIdxList = normTautomers(compounds_r1, getTautomers=True, getTautomersIdx=True, printlogs=False)
Tautomers_count = len(TautomersList)
#------------------------
if printlogs:
if Unverified_count > 0:
print("=======================================================")
print("Succeeded to verify {}/{} structures".format(len(compounds_r1), len(compounds)))
print("Failed to verify {} structures \n".format(Unverified_count))
else:
print("=======================================================")
print("Succeeded to validate {}/{} structures \n".format(len(compounds_r1), len(compounds)))
if Stereoisomers_count > 0:
if deSterioisomerize:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2)-Stereoisomers_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized \n".format(Stereoisomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized \n".format(Stereoisomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2), len(compounds)))
if Chemotypes_count > 0:
if normalizeChemotypes:
print("=======================================================")
compounds_r3_ = normChemotypes(compounds_r2, normalizeChemotypes=True, printlogs=False)
print("{}/{} structures are NOT chemotypes".format(len(compounds_r3_)-Chemotypes_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were normalized \n".format(Chemotypes_count, len(compounds)))
else:
print("=======================================================")
compounds_r3_ = normChemotypes(compounds_r2, normalizeChemotypes=False, printlogs=False)
print("{}/{} structures are NOT chemotypes".format(len(compounds_r3_), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) but was/were NOT normalized \n".format(Chemotypes_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT chemotypes \n".format(len(compounds_r2), len(compounds)))
if Tautomers_count > 0:
if deTautomerize:
print("=======================================================")
compounds_r4_ = normTautomers(compounds_r2, deTautomerize=True, printlogs=False)
print("{}/{} structures are NOT tautomers".format(len(compounds_r4_)-Tautomers_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were detautomerized \n".format(Tautomers_count, len(compounds)))
else:
print("=======================================================")
compounds_r4_ = normTautomers(compounds_r2, deTautomerize=False, printlogs=False)
print("{}/{} structures are NOT tautomers".format(len(compounds_r4_), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) but was/were NOT detautomerized \n".format(Tautomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT tautomers \n".format(len(compounds_r2), len(compounds)))
#------------------------
NormalizedList = compounds_r4
UnNormalizedList = UnverifiedList + TautomersList + StereoisomersList
UnNormalizedLabel = len(UnverifiedList)*["UnverifiedStruct"] + len(ChemotypesList)*["Chemotype"] + len(TautomersList)*["Tautomer"] + len(StereoisomersList)*["Stereoisomer"]
FunctionLabel = len(UnverifiedList)*["molStructVerify()"] + len(ChemotypesList)*["normChemotypes()"] + len(TautomersList)*["normTautomers()"] + len(StereoisomersList)*["normStereoisomers()"]
IdxLabel = UnverifiedIdxList + ChemotypesIdxList + TautomersIdxList + StereoisomersIdxList
df1 = pd.DataFrame(zip(UnNormalizedList, UnNormalizedLabel, FunctionLabel, IdxLabel), columns=['SMILES', 'errorTag', 'fromFunction', 'idx'])
#------------------------
if printlogs:
print("=======================================================")
print("SUMMARY:")
if len(UnverifiedList) > 0:
print("{}/{} structures were successfully verfied".format(len(compounds_r1), len(compounds)))
print("{}/{} structure(s) was/were unsuccessfully verfied and need to be rechecked".format(len(UnverifiedList), len(compounds)))
else:
print("{}/{} structure were successfully verfied".format(len(compounds_r1), len(compounds)))
if len(UnNormalizedList) > 0:
print("{}/{} structures were successfully normalized".format(len(NormalizedList), len(compounds)))
if len(compounds_r1) > len(NormalizedList):
print("{}/{} structure(s) was/were unsuccessfully normalized and need to be rechecked".format(len(compounds_r1)-len(NormalizedList), len(compounds)))
print("=======================================================")
else:
print("{}/{} structures were successfully normalized".format(len(NormalizedList), len(compounds)))
print("-------------------------------------------------------")
if len(UnNormalizedList) > 0:
if getUnnormalizedStruct == False:
print("set 'getUnnormalizedStruct=True' to get the list of all unnormalized structures. \n")
#------------------------
if getUnnormalizedStruct:
if exportCSV:
filePath = outputPath + "UnnormalizedList.csv"
if os.path.isdir(outputPath):
df1.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df1.to_csv(filePath, index=False)
else:
return df1
else:
if removeDuplicates:
DeduplicatedNormalizedList = molDeduplicate(NormalizedList, printlogs=False)
_, DuplicatedNormalizedIdxList = molDeduplicate(NormalizedList, getDuplicates=True, getDuplicatesIdx=True, printlogs=False)
if len(DeduplicatedNormalizedList) == len(NormalizedList):
if printlogs:
print("No duplicate was found (in {} normalized structures)".format(len(NormalizedList)))
if getDuplicatedIdx:
df0 = pd.DataFrame(DuplicatedNormalizedIdxList)
df0.columns = ['idx', 'matchedIdx']
df0['fromFunction'] = 'normalizeComplete()'
if exportCSV:
filePath = outputPath + "DuplicatedNormalizedIdxList.csv"
if os.path.isdir(outputPath):
df0.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df0.to_csv(filePath, index=False)
else:
return df0
else:
df0 = pd.DataFrame(DeduplicatedNormalizedList)
df0.columns = ['SMILES']
if exportCSV:
filePath = outputPath + "NoDuplicatedNormalizedIdxList.csv"
if os.path.isdir(outputPath):
df0.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df0.to_csv(filePath, index=False)
else:
return df0
else:
if printlogs:
print("=============================================================================")
print("There are {} unique structures filtered from {} initial normalized structures".format(len(DeduplicatedNormalizedList), len(NormalizedList)))
print("=============================================================================")
print("To get detailed information, please follow steps below:")
print("(1) Rerun normalizeComplete() with setting 'removeDuplicates=False' to get the list of all normalized structures")
print("(2) Run ultils.molDeduplicate() with setting 'getDuplicates=True'to get the list of duplicated structures \n")
print("--OR--")
print("Rerun normalizeComplete() with setting 'getDuplicates=True', 'exportCSV'=True, and 'outputPath=<Directory>' to export a csv file containing the list of duplicated structures \n")
print("--OR--")
print("Run ultils.molDeduplicate() with settings 'getDuplicates=True' and 'getDuplicatesIndex=True' to get the list of duplicated structures with detailed indices")
if getDuplicatedIdx:
df2 = | pd.DataFrame(DuplicatedNormalizedIdxList) | pandas.DataFrame |
import pandas as pd
import json_parser
import trace_visualizer
import logging
import os.path
import plotly.graph_objects as go
def parse_k8s_kpis_as_dataframe(filename):
# Parses a KPI file consisting of several lines of raw KPIs as output by the following kubectl command
# kubectl get - -raw / apis / metrics.k8s.io / v1beta1 / pods / >> kpidump.log
d = json_parser.load_json_file(filename)
df = pd.DataFrame(data=d)
df = df.drop_duplicates(subset=['pod', 'container', 'namespace', 'timestamp'])
df['pod+container'] = df['pod'] + '+' + df['container']
# CPU is in full cores and memory in MB. Window in seconds
# pd.set_option('display.max_rows', 100)
# display(df)
# Group KPIs per namespace
data_to_plot = df.groupby(['timestamp', 'namespace']).sum().loc[:, ['cpu', 'memory']]
data_to_plot['pod count'] = df.groupby(['timestamp', 'namespace']).agg({"pod": "nunique"})
data_to_plot['container count'] = df.groupby(['timestamp', 'namespace']).agg({"pod+container": "nunique"})
data_to_plot = data_to_plot.reset_index()
return data_to_plot
def import_pcap_as_dataframe(pcap_files, http2_ports, wireshark_version, logging_level=logging.INFO):
# Imports one or more pcap files as a dataframe with the packet parsing implemented in the trace_visualizer code
# Accept either a single path or a list or paths
if not type(pcap_files) is list:
pcap_files = [pcap_files]
current_verbosity_level = trace_visualizer.application_logger.level
try:
# Reduce verbosity
trace_visualizer.application_logger.setLevel(logging_level)
packets_df_list = []
if len(pcap_files) == 0:
return None
for idx, file in enumerate(pcap_files):
if os.path.exists(file):
pdml_file = trace_visualizer.call_wireshark(wireshark_version, file, http2_ports)
packet_descriptions = trace_visualizer.import_pdml(pdml_file, diagrams_to_output='raw')
packets_df = pd.DataFrame(packet_descriptions,
columns=['ip_src', 'ip_dst', 'frame_number', 'protocol', 'msg_description',
'timestamp', 'timestamp_offset'])
packets_df['datetime'] = pd.to_datetime(packets_df['timestamp'], unit='s')
packets_df['msg_description'] = packets_df['msg_description'].str.replace('\\n', '\n')
packets_df['summary_raw'] = [trace_visualizer.packet_to_str(p).protocol for p in packet_descriptions]
# Generate summary column
packets_df['summary'] = packets_df.apply(_generate_summary_row, axis=1)
packets_df['file'] = file
packets_df['file_idx'] = idx
packets_df_list.append(packets_df)
# Consolidated packet list
packets_df = | pd.concat(packets_df_list) | pandas.concat |
import os
import math
import warnings
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import shutil as sh
from glob import glob
from PIL import Image
from copy import copy
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
import libs.dirs as dirs
import libs.commons as commons
import libs.utils as utils
from libs.index import IndexManager
from libs.get_frames_class import GetFramesFull
# User Input
def get_input_network_type(net_type_dict, message="network"):
'''
Select bewteen reference or semiauto network/dataset
'''
dictLen = len(net_type_dict)
print("\nEnter {} type code from list:\n".format(message))
print("Code\tName")
for i in range(dictLen):
print("{}:\t{}".format(i, net_type_dict[i]))
input_code = int(input())
if input_code < dictLen:
target_net = net_type_dict[input_code]
else:
target_net = "UNKNOWN"
while target_net not in net_type_dict.values():
input_code = input("Unknown network. Please select a network from the list.\n")
try:
input_code = int(input_code)
except ValueError:
continue
if input_code < dictLen:
target_net = net_type_dict[input_code]
return target_net
def get_input_target_class(net_class_dict):
'''
Get user input of net target class. Applicable to rede3 only.
'''
classLen = len(net_class_dict)
print("Enter the target class code from list:\n")
print("Code\tClass name")
for i in range(classLen):
print("{}:\t{}".format(i, net_class_dict[i]))
input_class_code = int(input())
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
else:
event_class = "UNKNOWN"
while event_class not in net_class_dict.values():
input_class_code = input("Unknown class. Please select a class from the list.\n")
try:
input_class_code = int(input_class_code)
except ValueError:
continue
if input_class_code < classLen:
event_class = net_class_dict[input_class_code]
return event_class
# Reports and logging
def get_class_counts(index, class_column, pos_label, neg_label):
'''
Returns index class counts according to input labels.
index: pandas DataFrame
DataFrame with the elements to be counted.
pos_label, neg_label: any object or list
labels to be compared to elements of index[class_column]. If any neg_label
is None, the count of negative elements will be <Total index size> - <positive count>.
'''
if isinstance(pos_label, str) or not(hasattr(pos_label, "__iter__")):
pos_label = [pos_label]
if isinstance(neg_label, str) or not(hasattr(neg_label, "__iter__")):
neg_label = [neg_label]
posCount = 0
for label in pos_label:
posCount += index.groupby(class_column).get_group(label).count()[0]
negCount = 0
for label in neg_label:
if label is None:
negCount = index.shape[0] - posCount
break
# Else, count normally
negCount += index.groupby(class_column).get_group(label).count()[0]
return posCount, negCount
def get_net_class_counts(index_path, net, target_class=None):
'''
Chooses correct class labels to use in a get_class_counts function call
according to input net and target_class.
'''
assert Path(index_path).is_file(), "Index path does not exist."
index = remove_duplicates(pd.read_csv(index_path, low_memory=False), "FrameHash")
if (net == 3) and (target_class not in commons.rede3_classes.values()):
raise ValueError("Net 3 requires a valid target_class.")
if net == 1:
classColumn = "rede1"
posLabel = commons.rede1_positive
negLabel = commons.rede1_negative
mask = None
elif net ==2:
classColumn = "rede2"
posLabel = commons.rede2_positive
negLabel = commons.rede2_negative
mask = (index["rede1"] == commons.rede1_positive)
elif net == 3:
classColumn = "rede3"
posLabel = target_class
negLabel = None
mask = (index["rede2"] == commons.rede2_positive)
if mask is not None:
# Pass only relevant fraction of index to get_class_counts
index = index.loc[mask, :]
# Translate to binary classes
index[classColumn] = translate_labels(index[classColumn], classColumn)
return get_class_counts(index, classColumn, posLabel, negLabel)
def save_seed_log(log_path, seed, id_string):
# Save sample seed
if Path(log_path).is_file():
f = open(log_path, 'a')
else:
f = open(log_path, 'w')
f.write("{}\n{}\n".format(id_string, seed))
f.close()
def get_loop_stats(loop_folder): # TODO: Finish function
statsDf = pd.DataFrame()
return statsDf
def make_report(report_path, sampled_path, manual_path, automatic_path, prev_unlabeled_path,
train_info, rede=1, target_class=None, show=False):
sampledIndex = pd.read_csv(sampled_path)
manualIndex = pd.read_csv(manual_path)
autoIndex = pd.read_csv(automatic_path)
prevUnlabelIndex = pd.read_csv(prev_unlabeled_path)
# Get report information
numUnlabel = prevUnlabelIndex.shape[0]
numSampled = sampledIndex.shape[0]
sampledNaoDuto = 0
if rede == 1:
sampledNaoDuto = sampledIndex.groupby("rede1").get_group("Confuso").count()[0]+\
sampledIndex.groupby("rede1").get_group("Nada").count()[0]
sampledDuto = sampledIndex.groupby("rede1").get_group(commons.rede1_positive).count()[0]
sampledNaoEvento = 0
sampledEvento = sampledIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0]
if rede < 3:
sampledNaoEvento = sampledIndex.groupby("rede2").get_group(commons.rede2_negative).count()[0]
sampledTotal = sampledDuto + sampledNaoDuto
naoDutoPercent = sampledNaoDuto/sampledTotal*100
dutoPercent = sampledDuto/sampledTotal*100
eventoPercent = sampledEvento/sampledTotal*100
naoEventoPercent = sampledNaoEvento/sampledTotal*100
if rede == 1:
negLabelName = commons.rede1_negative
posLabelName = commons.rede1_positive
cumNeg = manualIndex.groupby("rede1").get_group('Nada').count()[0]+\
manualIndex.groupby("rede1").get_group('Confuso').count()[0]
cumPos = manualIndex.groupby("rede1").get_group(commons.rede1_positive).count()[0]
# Exception for case where there are no positive or negative images automatically annotated
if commons.rede1_negative in set(autoIndex['rede1'].values):
autoNeg = autoIndex.groupby("rede1").get_group(commons.rede1_negative).count()['rede1']
else:
autoNeg = 0
if commons.rede1_positive in set(autoIndex['rede1'].values):
autoPos = autoIndex.groupby("rede1").get_group(commons.rede1_positive).count()['rede1']
else:
autoPos = 0
elif rede == 2:
negLabelName = commons.rede2_negative
posLabelName = commons.rede2_positive
cumNeg = manualIndex.groupby("rede2").get_group(commons.rede2_negative).count()[0]
cumPos = manualIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0]
# Exception for case where there are no positive or negative images automatically annotated
if commons.rede2_negative in set(autoIndex['rede2'].values):
autoNeg = autoIndex.groupby("rede2").get_group(commons.rede2_negative).count()['rede2']
else:
autoNeg = 0
if commons.rede2_positive in set(autoIndex['rede2'].values):
autoPos = autoIndex.groupby("rede2").get_group(commons.rede2_positive).count()['rede2']
else:
autoPos = 0
elif rede == 3:
negLabelName = "Nao"+target_class
posLabelName = target_class
sampledClassPos = sampledIndex.groupby("rede3").get_group(posLabelName).count()[0]
sampledClassNeg = sampledIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - sampledClassPos
sampledTotal = sampledIndex.shape[0]
sampleNegPercent = sampledClassNeg/sampledTotal*100
samplePosPercent = sampledClassPos/sampledTotal*100
cumPos = manualIndex.groupby("rede3").get_group(posLabelName).count()[0]
cumNeg = manualIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - cumPos
# Exception for case where there are no positive or negative images automatically annotated
if posLabelName in set(autoIndex['rede3'].values):
autoPos = autoIndex.groupby("rede3").get_group(posLabelName).count()['rede3']
else:
autoPos = 0
autoNeg = autoIndex.groupby("rede2").get_group(commons.rede2_positive).count()[0] - autoPos
cumTotal = cumPos + cumNeg
cumNegPercent = cumNeg/cumTotal*100
cumPosPercent = cumPos/cumTotal*100
autoLabel = autoIndex.shape[0]
autoLabelPercent = autoLabel/numUnlabel*100
# Compose manual image distribution string
distributionString = "Manual annotation distribution:\n"
if (rede == 1) or (rede == 2):
distributionString +=\
"NaoDuto: {} images ({:.2f} %)\n\
Duto: {} images ({:.2f} %)\n\
NaoEvento {} images ({:.2f} %)\n\
Evento: {} images ({:.2f} %)\n\
Total: {} images (100%)\n".format(sampledNaoDuto, naoDutoPercent, sampledDuto, dutoPercent,
sampledNaoEvento, naoEventoPercent, sampledEvento, eventoPercent,
sampledTotal)
if rede == 3:
distributionString +=\
"{}:\t{} images ({:.2f} %)\n\
{}:\t\t{} images ({:.2f} %)\n\
Total\t\t{} images (100 %)\n".format(posLabelName, sampledClassPos, samplePosPercent,
negLabelName, sampledClassNeg, sampleNegPercent,
sampledTotal)
# Assemble report string
reportString = "Rede{}.\n{} unlabeled images remain. Sampled {} images for manual annotation.\n".format(rede,
numUnlabel, numSampled)+\
distributionString+\
"Cumulative manual annotation distribution:\n\
{}: {} images ({:.2f} %)\n\
{}: {} images ({:.2f} %)\n\
Total: {} images (100%)\n".format(negLabelName, cumNeg, cumNegPercent,
posLabelName, cumPos, cumPosPercent, cumTotal)+\
"Train Hyperparams:\n\
Num Epochs: {}\n\
Batch Size: {}\n\
Optimizer: Adam\n\
Train Results:\n\
Elapsed Time: {}m\n\
Best val loss: {:.4f}\n\
Best val accuracy: {:.2f} %\n".format(1,2,3,4,5)+\
"Thresholds val (99% pos ratio):\n\
Upper 99% positive ratio: {:.4f}, {:.2f} % ground truth positives\n\
Lower 1% positive ratio: {:.4f}, {:.2f} % ground truth positives\n\
Validation: {}/{} = {:.2f} % images annotated\n\
Automatic Annotation:\n\
Imgs Positivas: {}; Imgs Negativas: {}\n\
{}/{} = {:.2f} % imagens anotadas automaticamente\n".format(1.,2.,3.,4.,5.,6.,7., autoPos, autoNeg,
autoLabel,numUnlabel, autoLabelPercent)
# TODO: Add train info
# Write report
# with open(report_path, 'w') as f:
# f.write(reportString)
utils.write_string(reportString, report_path, mode='w')
if show:
print(reportString)
return reportString
# Automatic labeling
def automatic_labeling(outputs, outputs_index, unlabeled_index, upper_thresh, lower_thresh, rede,
target_class=None, verbose=True):
'''
Return a DataFrame whose entries are taken from unlabeled_index according to calculated indexes.
The indexes are chosen so that their outputs are either above the upper threshold or below the lower.
'''
upperIndexes, lowerIndexes = get_auto_label_indexes(outputs, outputs_index, upper_thresh,
lower_thresh, verbose=True)
autoIndex = get_classified_index(unlabeled_index, upperIndexes, lowerIndexes, rede,
index_col="FrameHash", target_class=target_class, verbose=False)
return autoIndex
def get_auto_label_indexes(outputs, outputs_index, upper_thresh, lower_thresh, verbose=True):
datasetLen = len(outputs)
indexes = outputs_index
upperIndexes = indexes[np.greater(outputs, upper_thresh)]
lowerIndexes = indexes[np.less(outputs, lower_thresh)]
totalClassified = len(upperIndexes) + len(lowerIndexes)
if verbose:
print("\nIdeal Upper Threshold: ", upper_thresh)
print("Ideal Lower Threshold: ", lower_thresh)
print("\nImages in:")
print("upperIndexes: ", len(upperIndexes))
print("lowerIndexes: ", len(lowerIndexes))
print("\nImages automatically labeled: {}/{} = {:.2f} %".format(totalClassified, datasetLen,
(totalClassified)/datasetLen*100))
return upperIndexes, lowerIndexes
def get_classified_index(index, pos_hashes, neg_hashes, rede, target_class=None, index_col="FrameHash",
verbose=True):
'''
Create new auto labeled index from the unlabeled_images index and positive and negative indexes
lists.
'''
if index_col is not None:
index.set_index("FrameHash", drop=False, inplace=True)
if rede >= 1:
positiveLabel1 = commons.rede1_positive
negativeLabel1 = commons.rede1_negative
if rede >= 2:
positiveLabel2 = commons.rede2_positive
negativeLabel2 = commons.rede2_negative
if rede >= 3:
assert target_class in commons.rede3_classes.values(), "Unknown target_class value."
positiveLabel3 = target_class
newPositives = index.reindex(labels=pos_hashes, axis=0, copy=True)
newNegatives = index.reindex(labels=neg_hashes, axis=0, copy=True)
datasetLen = len(index)
lenPositives = len(newPositives)
lenNegatives = len(newNegatives)
# Set positive and negative class labels
if rede == 1:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [negativeLabel1]*lenNegatives
if rede == 2:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [positiveLabel1]*lenNegatives
newPositives["rede2"] = [positiveLabel2]*lenPositives
newNegatives["rede2"] = [negativeLabel2]*lenNegatives
if rede == 3:
newPositives["rede1"] = [positiveLabel1]*lenPositives
newNegatives["rede1"] = [positiveLabel1]*lenNegatives
newPositives["rede2"] = [positiveLabel2]*lenPositives
newNegatives["rede2"] = [positiveLabel2]*lenNegatives
newPositives["rede3"] = [positiveLabel3]*lenPositives
newLabeledIndex = pd.concat([newPositives, newNegatives], axis=0, sort=False)
# Pra que isso de novo?
if rede == 2:
newPositives["rede1"] = [positiveLabel1]*lenPositives
if verbose:
print(newLabeledIndex.shape)
print("Unlabeled images: ", datasetLen)
print("New pos labels: ", lenPositives)
print("New neg labels: ", lenNegatives)
print("Total new labels: ", lenPositives+lenNegatives)
print("New labels len: ", newLabeledIndex.shape)
print("\nAutomatic anotation of {:.2f} % of input images.".format(len(newLabeledIndex)/datasetLen*100))
return newLabeledIndex
## Threshold finding
def compute_thresholds(val_outputs, labels,
upper_ratio=0.95,
lower_ratio=0.01,
resolution=0.001,
val_indexes=None):
val_outputs = np.squeeze(utils.normalize_array(val_outputs))
val_outputs = val_outputs[:, 0]
resBits = len(str(resolution)) -2
# Maximum resolution is to test a threshold on all output values
if resolution == 'max':
upperThreshList = np.sort(val_outputs)
lowerThreshList = copy(upperThreshList)[::-1]
else:
lowerThreshList = np.arange(0., 1., resolution)
upperThreshList = np.arange(1., 0., -resolution)
# upperThreshList = np.arange(0., 1., resolution)
# lowerThreshList = np.arange(1., 0., -resolution)
# Find upper threshold
idealUpperThresh = find_ideal_upper_thresh(
val_outputs, labels, upperThreshList, ratio=upper_ratio)#, verbose=True)
# Find lower threshold
idealLowerThresh = find_ideal_lower_thresh(
val_outputs, labels, lowerThreshList, ratio=lower_ratio)
idealLowerThresh = np.around(idealLowerThresh, decimals=resBits)
idealUpperThresh = np.around(idealUpperThresh, decimals=resBits)
## If thresholds break, take the mean value
## TODO: Instead of choosing the mean, choose a thresh that maximizes AUC
# if idealUpperThresh < idealLowerThresh:
# meanThresh = (idealUpperThresh+idealLowerThresh)/2
# idealUpperThresh = meanThresh
# idealLowerThresh = meanThresh
if val_indexes is not None:
get_auto_label_indexes(val_outputs, val_indexes, idealUpperThresh, idealLowerThresh, verbose=True)
return idealUpperThresh, idealLowerThresh
def upper_positive_relative_ratio(outputs, labels, threshold):
'''
Compute ratio of ground truth positive examples above given threshold relative only
to the examples above the threshold.
'''
datasetLen = len(outputs)
mask = np.greater(outputs, threshold)
indexes = np.arange(datasetLen)[mask]
if len(indexes) > 0:
posPercent = np.sum(labels[indexes] == 0)/len(indexes) # Positive class index is 0
else:
return 1.
return posPercent
def lower_positive_ratio(outputs, labels, threshold):
'''
Compute ratio of ground truth positive examples below a given threshold relative
to the entire dataset.
'''
datasetLen = len(outputs)
mask = np.less(outputs, threshold)
indexes = np.arange(datasetLen)[mask]
if len(indexes) > 0:
posPercent = np.sum(labels[indexes] == 0)/datasetLen # Positive class index is 0
else:
return 0.
return posPercent
def find_ideal_lower_thresh(outputs, labels, threshold_list=None, ratio=0.01, resolution=0.001, verbose=False):
if verbose:
print("\nThreshold\tLower Pos Ratio")
if threshold_list is None:
threshold_list = np.arange(0., 1., resolution)
for i in tqdm(range(len(threshold_list))):
lowerThresh = threshold_list[i]
posRatio = lower_positive_ratio(outputs, labels, lowerThresh)
if verbose:
print("{:.2f}\t\t{:.2f}".format(lowerThresh, posRatio)) # Print search progress
if (posRatio > ratio) and (ratio > 0.):
if i-1 < 0:
print("\nThreshold could not be found.")
return None
idealThresh = threshold_list[i-1]
posRatio = lower_positive_ratio(outputs, labels, idealThresh)
print("\nFound ideal Lower threshold {:.3f} with {:.2f} % ground truth positives.".format(idealThresh, posRatio*100))
return idealThresh
def find_ideal_upper_thresh(outputs, labels, threshold_list=None, ratio=0.95, resolution=0.001, verbose=False):
if verbose:
print("\nThreshold\tUpper Pos Ratio")
if threshold_list is None:
threshold_list = np.arange(1., 0., -resolution)
for i in tqdm(range(len(threshold_list))):
upperThresh = threshold_list[i]
posRatio = upper_positive_relative_ratio(outputs, labels, upperThresh)
if verbose:
print("{:.2f}\t\t{:.2f}".format(upperThresh, posRatio)) # Print search progress
if (posRatio < ratio) and (ratio < 1.):
if i-1 < 0:
print("\nThreshold could not be found.")
return None
idealThresh = threshold_list[i-1]
posRatio = upper_positive_relative_ratio(outputs, labels, idealThresh)
print("\nFound ideal Upper threshold {:.3f} with {:.2f} % ground truth positives.".format(idealThresh, posRatio*100))
return idealThresh
## Dataset files manipulation
def df_to_csv(dataframe, save_path, verbose=True):
dirs.create_folder(Path(save_path).parent)
dataframe.to_csv(save_path, index=False)
if verbose:
print("Saved DataFrame to ", save_path)
def get_ref_dataset_val_video_list(folder_path, verbose=False):
'''
Get a list of video hashes from a dataset folder with a specific file tree.
folder_path/
xxx/hash1/
yyy/hash2/
...
Returns non-duplicated list of found hashes.
'''
globString = str(folder_path)+"/**"
folderList = glob(globString, recursive=True)
videoList = []
for pathEntry in folderList:
relString = Path(pathEntry).relative_to(folder_path)
if len(relString.parts) == 2:
videoHash = relString.parts[-1]
videoList.append(videoHash)
videoList = list(set(videoList))
return videoList
def split_validation_set_from_video_list(df_path, index_list, key_column="HashMD5", verbose=False):
'''
Split a DataFrame given by df_path in two, according to index_list. The DataFrame is split in
two other: one containing only entries with indexes in index_list; the other is the converse,
containing none of the given indexes.
Arguments:
df_path: str filepath
Filepath to target DataFrame saved in csv format.
index_list: list
List of indices to guide the split. One split set will contain only entries with indexes
in this list and the other set will contain the remaining entries.
key_column: str
Name of the DataFrame column where the indexes of index_list will be searched.
Returns:
trainIndex: DataFrame
DataFrame subset from input DataFrame. Contains only entries with indexes not present in
index_list.
valIndex: DataFrame
DataFrame subset from input DataFrame. Contains only entries with indexes present in
index_list.
'''
index = pd.read_csv(df_path)
index.dropna(axis=0, subset=[key_column], inplace=True)
valHash = index_list
trainHash = set(index[key_column]) - set(valHash)
# valHash = utils.compute_file_hash_list(index_list)
index.set_index(key_column, drop=False, inplace=True)
trainIndex = index.loc[trainHash, :].copy()
valIndex = index.loc[valHash, :].copy()
trainIndex.reset_index(inplace=True, drop=True)
valIndex.reset_index(inplace=True, drop=True)
return trainIndex, valIndex
def merge_indexes(index_path_list, key_column):
'''
Read a list of DataFrame paths, concatenates them and remove duplicated elements from resulting DF.
'''
# assert (len(index_path_list) >= 2) and \
# not(isinstance(index_path_list, str)), \
# "Argument index_path_list must be a list of two or more DataFrame paths."
assert hasattr(index_path_list, "__iter__") and \
not(isinstance(index_path_list, str)), \
"Argument index_path_list must be a list of two or more DataFrame paths."
indexListNoDups = [remove_duplicates(pd.read_csv(x), key_column) for x in index_path_list]
if len(indexListNoDups) > 1:
newIndex = | pd.concat(indexListNoDups, axis=0, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 11:29:34 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
# from functools import partial
from american_option_pricing import american_option
import density_utilities as du
import prediction_ensemble_py as pe
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = | pd.read_excel('spy.xlsx', index_col=None) | pandas.read_excel |
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import os
process_raw = pd.read_csv(os.getcwd() + '/tf_scripts/suggester/process.csv')
features = ['RoomCount', 'EdgeCount', 'SubStepsCount', 'FPcount']
process_data = | pd.concat([process_raw], axis=1) | pandas.concat |
import subprocess
from datetime import datetime
import pandas as pd
def sacct_jobs(account_query, d_from, d_to='', debugging=False,
write_txt='', sacct_file='', serialize_frame=''):
"""Ingest job record information from slurm via sacct and return DataFrame.
Parameters
-------
account_query: str
String query to be sent to sacct via -A flag.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00'
d_to: optional, date str
End of the query period,
e.g. '2020-01-01T00:00:00' Defaults to now if empty.
debugging: boolean, optional
Boolean for reporting progress to stdout. Default False.
write_txt: str, optional
Writes the results of the raw sacct query to given file.
If empty, no file is created. Defaults to the empty string.
sacct_file: str, optional
Loads a raw query from file.
If empty, query is rerun. Defaults to the empty string.
serialize_frame: str, optional
Pickle the resulting DataFrame.
If empty, pickling is skipped. Defaults to the empty string.
Returns
-------
DataFrame
Returns a standard pandas DataFrame, or None if no jobs found.
"""
# d_to boilerplate
if d_to == '':
now = datetime.now()
d_to = now.strftime('%Y-%m-%dT%H:%M:%S')
headers = ['jobid', 'user', 'account', 'submit', 'start', 'end',
'ncpus', 'nnodes', 'reqmem', 'timelimit', 'state',
'reqgres', 'reqtres', 'priority', 'partition']
data = ''
if sacct_file == '':
if account_query != '':
base_cmd = ['sacct', '-aX', '-A', account_query, '-S',
d_from, '-E', d_to, '-p', '--delimiter',
'"|"', '-n', '--units=M']
else:
base_cmd = ['sacct', '-aX', '-S', d_from, '-E', d_to,
'-p', '--delimiter', '"|"', '-n', '--units=M']
base_cmd.append('-o')
base_cmd.append(','.join(headers)+'%36')
data = subprocess.check_output(base_cmd).decode('UTF-8')
if write_txt != '':
with open(write_txt, 'w') as f_id:
f_id.write('%s' % data)
else:
with open(sacct_file, 'r') as f_id:
data = f_id.read()
if debugging:
print('Done sacct query')
job_frame = pd.DataFrame([x.split('"|"') for x in data.split('\n')])
job_frame = job_frame.iloc[:, :-1] # Due to split implementation...
job_frame = job_frame.iloc[:-1, :] # Due to split implementation...
# Edge case before things start to happen...
if job_frame.empty:
return None
job_frame.columns = headers
# Align sacct to elasticsearch implementation
job_frame['reqcpus'] = pd.to_numeric(job_frame['ncpus'])
job_frame['nnodes'] = pd.to_numeric(job_frame['nnodes'])
job_frame['submit'] = pd.to_datetime(job_frame['submit'])
job_frame['start'] = pd.to_datetime(job_frame['start'], errors='coerce')
job_frame['end'] = | pd.to_datetime(job_frame['end'], errors='coerce') | pandas.to_datetime |
# %% 说明
# ------------------------------------------------------------------->>>>>>>>>>
# 最后更新ID name的时候用这个脚本,从师兄的list汇总完成替换
# os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
# ------------------------------------------------------------------->>>>>>>>>>
# %% imports and settings
from pandarallel import pandarallel
import datar.all as r
from datar import f
import plotnine as p9
import os
import numpy as np
import pandas as pd
import seaborn as sns
import glob
sns.set()
pd.set_option("max_colwidth", 100) # column最大宽度
| pd.set_option("display.width", 250) | pandas.set_option |
import re
import os
import xml.etree.ElementTree as ET
import pandas as pd
import boto3
import csv
from urllib.parse import unquote_plus
s3_client = boto3.client('s3')
s3 = boto3.resource('s3')
from xml_2_data import mnfp_2_data
from xml_2_data import mnfp1_2_data
from xml_2_data import mnfp2_2_data
from nmfp_rename_vars import nmfp_rename_vars
def lambda_handler(event, context):
# parse the S3 triggered event
debug = False
if debug:
bucket = "fundmapper"
key = "02-RawNMFPs/S000007665/2011-01-06-S000007665.txt"
else:
record = event['Records'][0]
bucket = record['s3']['bucket']['name']
key = unquote_plus(record['s3']['object']['key'])
prefix, series_id, filing = key.split("/")
print(bucket)
print(series_id)
print(filing)
print(key)
# store temporarily
print("download")
s3_client.download_file(bucket, key, f"/tmp/{series_id}_{filing}.txt")
print("downloaded")
# (s3.Object(bucket, key)
# .delete())
print("deleted")
# read
filing = open("/tmp/" + series_id + "_" + filing + ".txt", 'r').read()
filing = filing.replace(":", "")
filing_type = re.search("<TYPE>(.*)\n", filing).group(1)
filing_date = int(re.sub("[^0-9]", "", re.search("CONFORMED PERIOD OF REPORT(.*)\n", filing).group(1))[0:6])
filing_year = int(re.sub("[^0-9]", "", re.search("CONFORMED PERIOD OF REPORT(.*)\n", filing).group(1))[0:4])
filing = (filing.replace("\n", "")
.replace(' xmlns="http//www.sec.gov/edgar/nmfpsecurities"', '')
.replace(' xmlns="http//www.sec.gov/edgar/nmfpfund"', ""))
print("convert")
if filing_type in ["N-MFP", "N-MFP/A"]:
series_df, class_df, holdings, all_collateral = mnfp_2_data(filing)
series_df, class_df, holdings, all_collateral = nmfp_rename_vars(filing_type, series_df, class_df, holdings,
all_collateral)
if filing_type in ["N-MFP1", "N-MFP1/A"]:
series_df, class_df, holdings, all_collateral = mnfp1_2_data(filing)
if filing_type in ["N-MFP2", "N-MFP2/A"]:
series_df, class_df, holdings, all_collateral = mnfp2_2_data(filing)
# drop , from all fields, GLUE doesn't get it seems...
series_df.replace({",": " "}, regex=True, inplace=True)
class_df.replace({",": " "}, regex=True, inplace=True)
holdings.replace({",": " "}, regex=True, inplace=True)
all_collateral.replace({",": " "}, regex=True, inplace=True)
# add date
series_df['date'], class_df['date'], holdings['date'], all_collateral[
'date'] = filing_date, filing_date, filing_date, filing_date
# add filing type
series_df['filing_type'], class_df['filing_type'], holdings['filing_type'], all_collateral[
'filing_type'] = filing_type, filing_type, filing_type, filing_type,
# add series id
series_df['series_id'], class_df['series_id'], holdings['series_id'], all_collateral[
'series_id'] = series_id, series_id, series_id, series_id
# holdings
holdings_str_columns = ['filing_type', 'repurchaseAgreement', 'securityDemandFeatureFlag',
'guarantorList', 'InvestmentIdentifier', 'NRSRO',
'isFundTreatingAsAcquisitionUnderlyingSecurities',
'finalLegalInvestmentMaturityDate', 'cik', 'weeklyLiquidAssetSecurityFlag', 'rating',
'investmentCategory', 'repurchaseAgreementList', 'dailyLiquidAssetSecurityFlag',
'securityCategorizedAtLevel3Flag', 'CUSIPMember', 'investmentMaturityDateWAM',
'ISINId', 'LEIID', 'titleOfIssuer', 'securityEnhancementsFlag', 'InvestmentTypeDomain',
'securityGuaranteeFlag', 'fundAcqstnUndrlyngSecurityFlag',
'securityEligibilityFlag', 'otherUniqueId', 'demandFeatureIssuerList', 'nameOfIssuer',
'illiquidSecurityFlag', 'series_id']
holdings_float_columns = ['yieldOfTheSecurityAsOfReportingDate', 'investmentMaturityDateWAL',
'AvailableForSaleSecuritiesAmortizedCost',
'includingValueOfAnySponsorSupport', 'excludingValueOfAnySponsorSupport',
'InvestmentOwnedBalancePrincipalAmount', 'percentageOfMoneyMarketFundNetAssets', ]
holdings_int_columns = ['date', 'issuer_number']
holdings_columns = holdings_str_columns + holdings_float_columns + holdings_int_columns
holdings_data = pd.DataFrame(columns=holdings_columns)
holdings_data = holdings_data.append(holdings)
del holdings
# collateral
collateral_str_columns = ['ctgryInvestmentsRprsntsCollateral', 'filing_type', 'LEIID',
'principalAmountToTheNearestCent',
'maturityDate', 'series_id', 'nameOfCollateralIssuer']
collateral_int_columns = ['issuer_number', 'date']
collateral_float_columns = ['couponOrYield', 'valueOfCollateralToTheNearestCent',
'AssetsSoldUnderAgreementsToRepurchaseCarryingAmounts',
'CashCollateralForBorrowedSecurities']
collateral_columns = collateral_str_columns + collateral_int_columns + collateral_float_columns
collateral_data = | pd.DataFrame(columns=collateral_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = | DataFrame([[2, 1], [2, 1]], columns=['b', 'a']) | pandas.DataFrame |
"""
TRAIN CLASSIFIER
Disaster Resoponse Project
Udacity - Data Science Nanodegree
How to run this script (Example)
> python train_classifier.py ../data/DisasterResponse.db classifier.pkl
Arguments:
1) SQLite db path (containing pre-processed data)
2) pickle file name to save trained ML model
"""
# import libraries
import pandas as pd
import numpy as np
import os
import pickle
import bz2
from sqlalchemy import create_engine
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, accuracy_score, f1_score, fbeta_score, classification_report
from scipy.stats import hmean
from scipy.stats.mstats import gmean
import time
import datetime
import sys
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])
# load data from database
def load_data(database_filepath):
"""
Load Data Function
Args:
database_filepath - path to SQLite db
Returns:
X - Series, Series containing features
Y - Dataframe, dataframe containing labels i.e. categories
category_names - list, list containing category names used for data visualization (app)
genre_names - list, list containing genre_names used for data visualization (app)
"""
engine = create_engine('sqlite:///'+database_filepath)
# 'messages_categories' is the name of the table in database DisasterResponse.db
df = | pd.read_sql_table('messages_categories',engine) | pandas.read_sql_table |
import numpy as np
import pandas as pd
from sapextractor.algo.o2c import o2c_common
from sapextractor.utils import constants
from sapextractor.utils.change_tables import extract_change
from sapextractor.utils.filters import case_filter
from sapextractor.utils.graph_building import build_graph
from sapextractor.algo.o2c import payment_part
def extract_changes_vbfa(con, dataframe, mandt="800"):
if len(dataframe) > 0:
case_vbeln = dataframe[["case:concept:name", "VBELN"]].to_dict("records")
else:
case_vbeln = []
case_vbeln_dict = {}
for x in case_vbeln:
caseid = x["case:concept:name"]
vbeln = x["VBELN"]
if vbeln not in case_vbeln_dict:
case_vbeln_dict[vbeln] = set()
case_vbeln_dict[vbeln].add(caseid)
ret = []
for tup in [("VERKBELEG", "VBAK"), ("VERKBELEG", "VBAP"), ("VERKBELEG", "VBUK"), ("LIEFERUNG", "LIKP"),
("LIEFERUNG", "LIPS"), ("LIEFERUNG", "VBUK")]:
changes = extract_change.apply(con, objectclas=tup[0], tabname=tup[1], mandt=mandt)
changes = {x: y for x, y in changes.items() if x in case_vbeln_dict}
for x, y in changes.items():
y = y[[xx for xx in y.columns if xx.startswith("event_")]]
cols = {x: x.split("event_")[-1] for x in y.columns}
cols["event_timestamp"] = "time:timestamp"
y = y.rename(columns=cols)
y["VBELN"] = y["AWKEY"]
y["concept:name"] = y["CHANGEDESC"]
for cc in case_vbeln_dict[x]:
z = y.copy()
z["case:concept:name"] = cc
ret.append(z)
if ret:
ret = pd.concat(ret)
else:
ret = pd.DataFrame()
return ret
def extract_bkpf_bsak(con, dataframe, gjahr="2020", mandt="800"):
if len(dataframe) > 0:
case_vbeln = dataframe[["case:concept:name", "VBELN"]].to_dict("records")
else:
case_vbeln = []
case_vbeln_dict = {}
for x in case_vbeln:
caseid = x["case:concept:name"]
vbeln = x["VBELN"]
if vbeln not in case_vbeln_dict:
case_vbeln_dict[vbeln] = set()
case_vbeln_dict[vbeln].add(caseid)
dict_awkey, clearance_docs_dates, blart_vals = payment_part.apply(con, gjahr=gjahr, mandt=mandt)
intersect = set(case_vbeln_dict.keys()).intersection(dict_awkey.keys())
ret = []
for k in intersect:
for belnr in dict_awkey[k]:
if belnr in clearance_docs_dates:
for clearingdoc in clearance_docs_dates[belnr]:
for cas in case_vbeln_dict[k]:
ret.append(
{"case:concept:name": cas, "concept:name": "Clearance (" + blart_vals[clearingdoc[2]] + ")",
"AUGBL": clearingdoc[0], "time:timestamp": clearingdoc[1]})
ret = pd.DataFrame(ret)
if len(ret) > 0:
if "time:timestamp" in ret.columns:
ret["time:timestamp"] = ret["time:timestamp"] + pd.Timedelta(np.timedelta64(86399, 's'))
ret = ret.groupby(["case:concept:name", "AUGBL"]).first().reset_index()
return ret
def apply(con, ref_type="Order", keep_first=True, min_extr_date="2020-01-01 00:00:00", gjahr="2020", enable_changes=True, enable_payments=True, allowed_act_doc_types=None, allowed_act_changes=None, mandt="800"):
dataframe = o2c_common.apply(con, keep_first=keep_first, min_extr_date=min_extr_date, mandt=mandt)
dataframe = dataframe[[x for x in dataframe.columns if x.startswith("event_")]]
cols = {x: x.split("event_")[-1] for x in dataframe.columns}
cols["event_activity"] = "concept:name"
cols["event_timestamp"] = "time:timestamp"
dataframe = dataframe.rename(columns=cols)
if len(dataframe) > 0:
all_docs = set(dataframe[dataframe["VBTYP_N"] == ref_type]["VBELN"].unique())
ancest_succ = build_graph.get_ancestors_successors(dataframe, "VBELV", "VBELN", "VBTYP_V", "VBTYP_N",
ref_type=ref_type, all_docs=all_docs)
# ancest_succ = build_graph.get_conn_comp(dataframe, "VBELV", "VBELN", "VBTYP_V", "VBTYP_N", ref_type=ref_type)
dataframe = dataframe.merge(ancest_succ, left_on="VBELN", right_on="node", suffixes=('', '_r'), how="right")
dataframe = dataframe.reset_index()
if keep_first:
dataframe = dataframe.groupby(["case:concept:name", "VBELN"]).first().reset_index()
if allowed_act_doc_types is not None:
allowed_act_doc_types = set(allowed_act_doc_types)
dataframe = dataframe[dataframe["concept:name"].isin(allowed_act_doc_types)]
if enable_changes:
changes = extract_changes_vbfa(con, dataframe, mandt=mandt)
else:
changes = pd.DataFrame()
if enable_payments:
payments = extract_bkpf_bsak(con, dataframe, gjahr=gjahr, mandt=mandt)
else:
payments = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from os import path
from src.utils.logger import Logger
from src.utils.path import PATH_DATA_OUTPUT, PATH_FEATURES, PATH_DATA_PROCESSED, PATH_REPORTS
import src.utils.input_output as io
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
import xgboost as xgb
import pickle
import warnings
warnings.filterwarnings('ignore')
def prepare_model_data(feature, idx, lag_length):
"""
Restructure raw datasets.
:param feature: feature dataset
:param idx: index price dataset
:param lag_length: created index lagging prices as features
:return: processed data
"""
idx.columns = ['Date', 'idx_price']
idx.index = idx['Date']
idx.drop(['Date'], axis=1, inplace=True)
# create lagging features - 15 months
for i in range(1, lag_length):
idx['lag_' + str(i)] = idx['idx_price'].shift(i)
# merge feature and index datasets
dt = feature.merge(idx, how='left', on='Date')
# change some columns formats
cols = ['Fed Balance Sheet', 'US Real Personal Income', 'US Real Personal Income exTrans',
'Adv Retail Sales US exFood Services']
for i in cols:
dt[i] = dt[i].apply(lambda x: x.replace(",", ""))
dt[i] = dt[i].apply(lambda x: x.replace(" ", ""))
dt[i] = dt[i].apply(lambda x: int(float(x)))
# move up price to predict
dt['pred_price'] = dt['idx_price'].shift(-1)
# truncate dataset
dt = dt.iloc[(lag_length - 1):, ]
# generate metrics for report
dt0 = idx.copy()
dt0['return'] = (dt0['idx_price']-dt0['idx_price'].shift(1))/dt0['idx_price'].shift(1)
print(dt0.shape)
max_month_gain = np.nanmax(dt0['return'])
max_month_loss = np.nanmin(dt0['return'])
cum_returns = (1 + dt0['return']).cumprod()
max_drawdown = np.ptp(cum_returns[1:]) / np.nanmax(cum_returns[1:])
annual_SR = (np.mean(dt0['return'])*np.sqrt(12))/np.std(dt0['return'])
def rolling_sharpe(y):
return np.sqrt(36) * (y.mean() / y.std())
rolling_SR_3y = dt0['return'].rolling(window=36).apply(rolling_sharpe)
avg_rolling_3y_SR = np.mean(rolling_SR_3y )
# generate dataframe
eval_metrics = pd.DataFrame([])
eval_metrics['max monthly gain'] = | pd.Series([max_month_gain]) | pandas.Series |
# Source
# Portfolio optimization in finance is the technique of creating a portfolio of assets, for which your investment has the maximum return and minimum risk.
# https://pythoninvest.com/long-read/practical-portfolio-optimisation
# https://github.com/realmistic/PythonInvest-basic-fin-analysis
##############################################################################################################
# ░█████╗░░██████╗░██████╗███████╗████████╗
# ██╔══██╗██╔════╝██╔════╝██╔════╝╚══██╔══╝
# ███████║╚█████╗░╚█████╗░█████╗░░░░░██║░░░
# ██╔══██║░╚═══██╗░╚═══██╗██╔══╝░░░░░██║░░░
# ██║░░██║██████╔╝██████╔╝███████╗░░░██║░░░
# ╚═╝░░╚═╝╚═════╝░╚═════╝░╚══════╝░░░╚═╝░░░
# ███╗░░░███╗░█████╗░███╗░░██╗░█████╗░░██████╗░███████╗███╗░░░███╗███████╗███╗░░██╗████████╗
# ████╗░████║██╔══██╗████╗░██║██╔══██╗██╔════╝░██╔════╝████╗░████║██╔════╝████╗░██║╚══██╔══╝
# ██╔████╔██║███████║██╔██╗██║███████║██║░░██╗░█████╗░░██╔████╔██║█████╗░░██╔██╗██║░░░██║░░░
# ██║╚██╔╝██║██╔══██║██║╚████║██╔══██║██║░░╚██╗██╔══╝░░██║╚██╔╝██║██╔══╝░░██║╚████║░░░██║░░░
# ██║░╚═╝░██║██║░░██║██║░╚███║██║░░██║╚██████╔╝███████╗██║░╚═╝░██║███████╗██║░╚███║░░░██║░░░
# ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚══╝╚═╝░░╚═╝░╚═════╝░╚══════╝╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░░░╚═╝░░░
##############################################################################################################
##############################################################################################################
# Portfolio Optimization with Python using Efficient Frontier with Practical Examples
##############################################################################################################
# Portfolio optimization is the process of creating a portfolio of assets, for which your investment has the maximum return and minimum risk.
# Modern Portfolio Theory (MPT), or also known as mean-variance analysis is a mathematical process which allows the user to maximize returns for a given risk level.
# It was formulated by <NAME> and while it is not the only optimization technique known, it is the most widely used.
# Efficient frontier is a graph with ‘returns’ on the Y-axis and ‘volatility’ on the X-axis.
# It shows the set of optimal portfolios that offer the highest expected return for a given risk level or the lowest risk for a given level of expected return.
##############################################################################################################
# Practical Portfolio Optimisation
##############################################################################################################
# What? Identify an optimal split for a known set of stocks and a given investment size.
# Why? Smart portfolio management will add a lot to the risk management of your trades: it can reduce the volatility of a portfolio, increase returns per unit of risk, and reduce the bad cases losses
# How? Use the library PyPortfolioOpt
# User guide: https://pyportfolioopt.readthedocs.io/en/latest/UserGuide.html
# Detailed Colab example (Mean-Variance-Optimisation): https://github.com/robertmartin8/PyPortfolioOpt/blob/master/cookbook/2-Mean-Variance-Optimisation.ipynb
# Plan
# 1. Prep work : imports, getting financial data, and pivot table of daily prices
# 2. Correlation matrix
# 3. PyPortfolioOpt : min volatility, max Sharpe, and min cVAR portfolios
# 4. PyPortfolioOpt : Efficient Frontier
# 5. PyPortfolioOpt : Discrete Allocation
##############################################################################################################
# 0. Prep work : imports, getting financial data, and pivot table of daily prices
##############################################################################################################
# pip install yfinance
import pandas as pd
import yfinance as yf
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
INVESTMENT = 10000
# yFinance ticker list https://finance.yahoo.com/cryptocurrencies
# BTC-USD
# ETH-USD
# BNB-USD
# XRP-USD
# LTC-USD
# ZAM-USD
# ADA-USD
# TRX-USD
TICKERS =['BTC-USD','ETH-USD', 'BNB-USD', 'XRP-USD','LTC-USD', 'ZAM-USD', 'ADA-USD', 'TRX-USD']
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
##############################################################################################################
stocks_prices = pd.DataFrame({'A' : []})
stocks_info = pd.DataFrame({'A' : []})
for i,ticker in enumerate(TICKERS):
print(i,ticker)
yticker = yf.Ticker(ticker)
# Get max history of prices
historyPrices = yticker.history(period='max')
# generate features for historical prices, and what we want to predict
historyPrices['Ticker'] = ticker
historyPrices['Year']= historyPrices.index.year
historyPrices['Month'] = historyPrices.index.month
historyPrices['Weekday'] = historyPrices.index.weekday
historyPrices['Date'] = historyPrices.index.date
# historical returns
for i in [1,3,7,30,90,365]:
historyPrices['growth_'+str(i)+'d'] = historyPrices['Close'] / historyPrices['Close'].shift(i)
# future growth 3 days
historyPrices['future_growth_3d'] = historyPrices['Close'].shift(-3) / historyPrices['Close']
# 30d rolling volatility : https://ycharts.com/glossary/terms/rolling_vol_30
historyPrices['volatility'] = historyPrices['Close'].rolling(30).std() * np.sqrt(252)
if stocks_prices.empty:
stocks_prices = historyPrices
else:
stocks_prices = pd.concat([stocks_prices,historyPrices], ignore_index=True)
##############################################################################################################
# Check one day
filter_last_date = stocks_prices.Date==stocks_prices.Date.max()
print("This is a double check. The last date of the data collected is: ")
print(stocks_prices.Date.max())
stocks_prices[filter_last_date]
print(stocks_prices[filter_last_date])
##############################################################################################################
# https://medium.com/analytics-vidhya/how-to-create-a-stock-correlation-matrix-in-python-4f32f8cb5b50
df_pivot = stocks_prices.pivot('Date','Ticker','Close').reset_index()
df_pivot.tail(5)
print(df_pivot.tail(5))
##############################################################################################################
# 1. Correlation matrix
##############################################################################################################
# https://www.statisticshowto.com/probability-and-statistics/correlation-coefficient-formula/
df_pivot.corr()
print(df_pivot.corr())
##############################################################################################################
# Print correlation matrix
# https://seaborn.pydata.org/generated/seaborn.heatmap.html
corr = df_pivot.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
f, ax = plt.subplots(figsize=(7, 5))
ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, annot=True, cmap='RdYlGn')
# show the plot window
plt.show()
##############################################################################################################
# 2. PyPortfolioOpt : min volatility, max Sharpe, and min cVAR portfolios
##############################################################################################################
# User guide: https://pyportfolioopt.readthedocs.io/en/latest/UserGuide.html
# https://github.com/robertmartin8/PyPortfolioOpt
# pip install PyPortfolioOpt
import pypfopt
print(f'\n Library version: {pypfopt.__version__}')
##############################################################################################################
# https://github.com/robertmartin8/PyPortfolioOpt/blob/master/cookbook/2-Mean-Variance-Optimisation.ipynb
from pypfopt import risk_models
from pypfopt import plotting
from pypfopt import expected_returns
from pypfopt import EfficientFrontier
##############################################################################################################
# json: for pretty print of a dictionary: https://stackoverflow.com/questions/44689546/how-to-print-out-a-dictionary-nicely-in-python/44689627
import json
mu = expected_returns.capm_return(df_pivot.set_index('Date'))
# Other options for the returns values: expected_returns.ema_historical_return(df_pivot.set_index('Date'))
# Other options for the returns values: expected_returns.mean_historical_return(df_pivot.set_index('Date'))
print(f'Expected returns for each stock: {mu} \n')
S = risk_models.CovarianceShrinkage(df_pivot.set_index('Date')).ledoit_wolf()
# Weights between 0 and 1 - we don't allow shorting
ef = EfficientFrontier(mu, S, weight_bounds=(0, 1))
ef.min_volatility()
weights_min_volatility = ef.clean_weights()
print(f'Portfolio weights for min volatility optimisation (lowest level of risk): {json.dumps(weights_min_volatility, indent=4, sort_keys=True)} \n')
print(f'Portfolio performance: {ef.portfolio_performance(verbose=True, risk_free_rate=0.01305)} \n')
# Risk-free rate : 10Y TBonds rate on 21-Jul-2021 https://www.cnbc.com/quotes/US10Y
###########################
# IMPORTANT: RISK-FREE RATE
###########################
# Risk-free rate : the input should be checked and modified accordingly https://www.cnbc.com/quotes/US10Y
##############################################################################################################
| pd.Series(weights_min_volatility) | pandas.Series |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
ret = ts.pct_change()
settings.returns['year_freq'] = '252 days' # same as empyrical
seed = 42
np.random.seed(seed)
benchmark_rets = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.wrapper.freq == day_dt
assert ret['a'].vbt.returns.wrapper.freq == day_dt
assert ret.vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.wrapper.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a']).obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts).obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total_return(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('total_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_total(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_return')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('annualized_volatility')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_annualized_volatility(ret.shape[0], minp=1, levy_alpha=test_alpha).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('calmar_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_calmar_ratio(ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('omega_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_omega_ratio(
ret.shape[0], minp=1, risk_free=test_risk_free, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_sharpe_ratio(self, test_risk_free):
res_a = empyrical.sharpe_ratio(ret['a'], risk_free=test_risk_free)
res_b = empyrical.sharpe_ratio(ret['b'], risk_free=test_risk_free)
res_c = empyrical.sharpe_ratio(ret['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.sharpe_ratio(risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sharpe_ratio(risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sharpe_ratio(ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_deflated_sharpe_ratio(self):
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.01),
pd.Series([np.nan, np.nan, 0.0005355605507117676], index=ret.columns).rename('deflated_sharpe_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.deflated_sharpe_ratio(risk_free=0.03),
pd.Series([np.nan, np.nan, 0.0003423112350834066], index=ret.columns).rename('deflated_sharpe_ratio')
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_downside_risk(self, test_required_return):
res_a = empyrical.downside_risk(ret['a'], required_return=test_required_return)
res_b = empyrical.downside_risk(ret['b'], required_return=test_required_return)
res_c = empyrical.downside_risk(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.downside_risk(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.downside_risk(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('downside_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_downside_risk(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_required_return",
[0.01, 0.02, 0.03],
)
def test_sortino_ratio(self, test_required_return):
res_a = empyrical.sortino_ratio(ret['a'], required_return=test_required_return)
res_b = empyrical.sortino_ratio(ret['b'], required_return=test_required_return)
res_c = empyrical.sortino_ratio(ret['c'], required_return=test_required_return)
assert isclose(ret['a'].vbt.returns.sortino_ratio(required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.sortino_ratio(required_return=test_required_return),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('sortino_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_sortino_ratio(
ret.shape[0], minp=1, required_return=test_required_return).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_information_ratio(self):
res_a = empyrical.excess_sharpe(ret['a'], benchmark_rets['a'])
res_b = empyrical.excess_sharpe(ret['b'], benchmark_rets['b'])
res_c = empyrical.excess_sharpe(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.information_ratio(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.information_ratio(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('information_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_information_ratio(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_beta(self):
res_a = empyrical.beta(ret['a'], benchmark_rets['a'])
res_b = empyrical.beta(ret['b'], benchmark_rets['b'])
res_c = empyrical.beta(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.beta(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.beta(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('beta')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_beta(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_risk_free",
[0.01, 0.02, 0.03],
)
def test_alpha(self, test_risk_free):
res_a = empyrical.alpha(ret['a'], benchmark_rets['a'], risk_free=test_risk_free)
res_b = empyrical.alpha(ret['b'], benchmark_rets['b'], risk_free=test_risk_free)
res_c = empyrical.alpha(ret['c'], benchmark_rets['c'], risk_free=test_risk_free)
assert isclose(ret['a'].vbt.returns.alpha(benchmark_rets['a'], risk_free=test_risk_free), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.alpha(benchmark_rets, risk_free=test_risk_free),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('alpha')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_alpha(
benchmark_rets, ret.shape[0], minp=1, risk_free=test_risk_free).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_tail_ratio(self):
res_a = empyrical.tail_ratio(ret['a'])
res_b = empyrical.tail_ratio(ret['b'])
res_c = empyrical.tail_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.tail_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.tail_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('tail_ratio')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_tail_ratio(
ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('value_at_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_value_at_risk(
ret.shape[0], minp=1, cutoff=test_cutoff).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
@pytest.mark.parametrize(
"test_cutoff",
[0.05, 0.06, 0.07],
)
def test_cond_value_at_risk(self, test_cutoff):
# empyrical can't tolerate NaN here
res_a = empyrical.conditional_value_at_risk(ret['a'].iloc[1:], cutoff=test_cutoff)
res_b = empyrical.conditional_value_at_risk(ret['b'].iloc[1:], cutoff=test_cutoff)
res_c = empyrical.conditional_value_at_risk(ret['c'].iloc[1:], cutoff=test_cutoff)
assert isclose(ret['a'].vbt.returns.cond_value_at_risk(cutoff=test_cutoff), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.cond_value_at_risk(cutoff=test_cutoff),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('cond_value_at_risk')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_cond_value_at_risk(
ret.shape[0], minp=1, cutoff=test_cutoff).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_capture(self):
res_a = empyrical.capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_up_capture(self):
res_a = empyrical.up_capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.up_capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.up_capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.up_capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.up_capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('up_capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_up_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_down_capture(self):
res_a = empyrical.down_capture(ret['a'], benchmark_rets['a'])
res_b = empyrical.down_capture(ret['b'], benchmark_rets['b'])
res_c = empyrical.down_capture(ret['c'], benchmark_rets['c'])
assert isclose(ret['a'].vbt.returns.down_capture(benchmark_rets['a']), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.down_capture(benchmark_rets),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('down_capture')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_down_capture(
benchmark_rets, ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_drawdown(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.drawdown(),
pd.Series(
np.array([0., 0., 0., 0., 0.]),
index=ret['a'].index,
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.drawdown(),
pd.DataFrame(
np.array([
[0., 0., 0.],
[0., -0.2, 0.],
[0., -0.4, 0.],
[0., -0.6, -0.33333333],
[0., -0.8, -0.66666667]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03',
'2018-01-04',
'2018-01-05'
], dtype='datetime64[ns]', freq=None),
columns=ret.columns
)
)
def test_max_drawdown(self):
res_a = empyrical.max_drawdown(ret['a'])
res_b = empyrical.max_drawdown(ret['b'])
res_c = empyrical.max_drawdown(ret['c'])
assert isclose(ret['a'].vbt.returns.max_drawdown(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.max_drawdown(),
pd.Series([res_a, res_b, res_c], index=ret.columns).rename('max_drawdown')
)
pd.testing.assert_series_equal(
ret.vbt.returns.rolling_max_drawdown(
ret.shape[0], minp=1).iloc[-1],
pd.Series([res_a, res_b, res_c], index=ret.columns).rename(ret.index[-1])
)
def test_drawdowns(self):
assert type(ret['a'].vbt.returns.drawdowns) is vbt.Drawdowns
assert ret['a'].vbt.returns.drawdowns.wrapper.freq == ret['a'].vbt.wrapper.freq
assert ret['a'].vbt.returns.drawdowns.wrapper.ndim == ret['a'].ndim
assert ret.vbt.returns.drawdowns.wrapper.ndim == ret.ndim
assert isclose(ret['a'].vbt.returns.drawdowns.max_drawdown(), ret['a'].vbt.returns.max_drawdown())
pd.testing.assert_series_equal(
ret.vbt.returns.drawdowns.max_drawdown(),
ret.vbt.returns.max_drawdown()
)
def test_stats(self):
pd.testing.assert_series_equal(
ret['b'].vbt.returns.stats(
benchmark_rets['b'],
levy_alpha=2.,
risk_free=0.01,
required_return=0.1
),
pd.Series([
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
-80.0,
-100.72986288899584,
-100.0,
208.74625745148103,
-39.93844058228336,
-1.25,
-80.0,
0.0,
-15.323368643952458,
-1.0653693625282994,
0.6452153997516223,
0.43684210526315786,
0.0,
-0.47500000000000003,
-0.9999978857530595,
0.4123019930790345
], index=[
'Start',
'End',
'Duration',
'Total Return [%]',
'Benchmark Return [%]',
'Annual Return [%]',
'Annual Volatility [%]',
'Sharpe Ratio',
'Calmar Ratio',
'Max Drawdown [%]',
'Omega Ratio',
'Sortino Ratio',
'Skew',
'Kurtosis',
'Tail Ratio',
'Common Sense Ratio',
'Value at Risk',
'Alpha',
'Beta'
], name='b')
)
pd.testing.assert_frame_equal(
ret.vbt.returns.stats(
benchmark_rets,
levy_alpha=2.,
risk_free=0.01,
required_return=0.1
),
pd.DataFrame([[
pd.Timestamp('2018-01-01 00:00:00'),
pd.Timestamp('2018-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
400.0,
451.8597134178033,
1.690784346944584e+37,
533.2682251925386,
24.139821935485003,
np.nan,
0.0,
np.inf,
np.inf,
1.4693345482106241,
2.030769230769236,
3.5238095238095237,
5.958001984471391e+35,
0.26249999999999996,
21533588.23721922,
0.7853755858374825
], [
| pd.Timestamp('2018-01-01 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python
import collections
import numpy as np
import pandas as pd
from scipy.sparse import *
__author__ = "peiyong"
class Sample:
def __init__(self, feature=None, label=None):
self.feature = feature
self.label = label
def read_sparse(datafile):
labels = []
cols = []
rows = []
values = []
with open(datafile, 'r') as f:
for i,line in enumerate(f):
line = line.rstrip().split(' ')
label = float(line[0])
label = -1 if label != 1 else 1
col = [int(v.split(":")[0]) for v in line[1:]]
row = [i]*len(col)
value = [float(v.split(":")[1]) for v in line[1:]]
labels.append(label)
rows.extend(row)
cols.extend(col)
values.extend(value)
shape = [max(rows)+1, max(cols)+1]
features = csr_matrix( (values,(rows,cols)), shape=shape )
labels = np.array(labels)
return features, labels
def read_dense(datafile):
"""
each row: [y \t x1, x2, x3 ...]
"""
labels = []
features = []
with open(datafile, 'r') as f:
for line in f:
y,x = line.rstrip().split('\t')
labels.append(float(y))
features.append(list(map(float, x.split(','))))
features = np.array(features)
labels = np.array(labels)
return features, labels
def read_csv(datafile, label_column_name):
data = | pd.pandas.read_csv(datafile) | pandas.pandas.read_csv |
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau_colors import colors_all
from biopsykit.utils.datatype_helper import AccDataFrame, GyrDataFrame, ImuDataFrame, SleepEndpointDict
_sleep_imu_plot_params = {
"background_color": ["#e0e0e0", "#9e9e9e"],
"background_alpha": [0.3, 0.3],
}
_bbox_default = dict(
fc=(1, 1, 1, plt.rcParams["legend.framealpha"]),
ec=plt.rcParams["legend.edgecolor"],
boxstyle="round",
)
def sleep_imu_plot(
data: Union[AccDataFrame, GyrDataFrame, ImuDataFrame],
datastreams: Optional[Union[str, Sequence[str]]] = None,
sleep_endpoints: Optional[SleepEndpointDict] = None,
downsample_factor: Optional[int] = None,
**kwargs,
) -> Tuple[plt.Figure, Iterable[plt.Axes]]:
"""Draw plot to visualize IMU data during sleep, and, optionally, add sleep endpoints information.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to plot. Data must either be acceleration data (:obj:`~biopsykit.utils.datatype_helper.AccDataFrame`),
gyroscope data (:obj:`~biopsykit.utils.datatype_helper.GyrDataFrame`), or IMU data
(:obj:`~biopsykit.utils.datatype_helper.ImuDataFrame`).
datastreams : str or list of str, optional
list of datastreams indicating which type of data should be plotted or ``None`` to only plot acceleration data.
If more than one type of datastream is specified each datastream is plotted row-wise in its own subplot.
Default: ``None``
sleep_endpoints : :obj:`~biopsykit.utils.datatype_helper.SleepEndpointDict`
dictionary with sleep endpoints to add to plot or ``None`` to only plot IMU data.
downsample_factor : int, optional
downsample factor to apply to raw input data before plotting or ``None`` to not downsample data before
plotting (downsample factor 1). Default: ``None``
**kwargs
optional arguments for plot configuration.
To configure which type of sleep endpoint annotations to plot:
* ``plot_sleep_onset``: whether to plot sleep onset annotations or not: Default: ``True``
* ``plot_wake_onset``: whether to plot wake onset annotations or not: Default: ``True``
* ``plot_bed_start``: whether to plot bed interval start annotations or not: Default: ``True``
* ``plot_bed_end``: whether to plot bed interval end annotations or not: Default: ``True``
* ``plot_sleep_wake``: whether to plot vspans of detected sleep/wake phases or not: Default: ``True``
To style general plot appearance:
* ``axs``: pre-existing axes for the plot. Otherwise, a new figure and axes objects are created and
returned.
* ``figsize``: tuple specifying figure dimensions
* ``palette``: color palette to plot different axes from input data
To style axes:
* ``xlabel``: label of x axis. Default: "Time"
* ``ylabel``: label of y axis. Default: "Acceleration :math:`[m/s^2]`" for acceleration data and
"Angular Velocity :math:`[°/s]`" for gyroscope data
To style legend:
* ``legend_loc``: location of legend. Default: "lower left"
* ``legend_fontsize``: font size of legend labels. Default: "smaller"
Returns
-------
fig : :class:`~matplotlib.figure.Figure`
figure object
axs : list of :class:`~matplotlib.axes.Axes`
list of subplot axes objects
"""
axs: List[plt.Axes] = kwargs.pop("ax", kwargs.pop("axs", None))
sns.set_palette(kwargs.get("palette", sns.light_palette(getattr(colors_all, "fau"), n_colors=4, reverse=True)[:-1]))
if datastreams is None:
datastreams = ["acc"]
if isinstance(datastreams, str):
# ensure list
datastreams = [datastreams]
fig, axs = _sleep_imu_plot_get_fig_axs(axs, len(datastreams), **kwargs)
downsample_factor = _sleep_imu_plot_get_downsample_factor(downsample_factor)
if len(datastreams) != len(axs):
raise ValueError(
"Number of datastreams to be plotted must match number of provided subplots! Expected {}, got {}.".format(
len(datastreams), len(axs)
)
)
for ax, ds in zip(axs, datastreams):
_sleep_imu_plot(
data=data,
datastream=ds,
downsample_factor=downsample_factor,
sleep_endpoints=sleep_endpoints,
ax=ax,
**kwargs,
)
fig.tight_layout()
fig.autofmt_xdate(rotation=0, ha="center")
return fig, axs
def _sleep_imu_plot_get_fig_axs(axs: List[plt.Axes], nrows: int, **kwargs):
figsize = kwargs.get("figsize", None)
if isinstance(axs, plt.Axes):
# ensure list (if only one Axes object is passed to sleep_imu_plot() instead of a list of Axes objects)
axs = [axs]
if axs is None:
fig, axs = plt.subplots(figsize=figsize, nrows=nrows)
else:
fig = axs[0].get_figure()
if isinstance(axs, plt.Axes):
# ensure list (if nrows == 1 only one axes object will be created, not a list of axes)
axs = [axs]
return fig, axs
def _sleep_imu_plot_get_downsample_factor(downsample_factor: int):
if downsample_factor is None:
downsample_factor = 1
# ensure int
downsample_factor = int(downsample_factor)
if downsample_factor < 1:
raise ValueError("'downsample_factor' must be >= 1!")
return downsample_factor
def _sleep_imu_plot(
data: pd.DataFrame,
datastream: str,
downsample_factor: int,
sleep_endpoints: SleepEndpointDict,
ax: plt.Axes,
**kwargs,
):
legend_loc = kwargs.get("legend_loc", "lower left")
legend_fontsize = kwargs.get("legend_fontsize", "smaller")
ylabel = kwargs.get("ylabel", {"acc": "Acceleration [$m/s^2$]", "gyr": "Angular Velocity [$°/s$]"})
xlabel = kwargs.get("xlabel", "Time")
if isinstance(data.index, pd.DatetimeIndex):
plt.rcParams["timezone"] = data.index.tz.zone
data_plot = data.filter(like=datastream)[::downsample_factor]
data_plot.plot(ax=ax)
if sleep_endpoints is not None:
kwargs.setdefault("ax", ax)
_sleep_imu_plot_add_sleep_endpoints(sleep_endpoints=sleep_endpoints, **kwargs)
if isinstance(data_plot.index, pd.DatetimeIndex):
# TODO add axis style for non-Datetime axes
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
ax.xaxis.set_minor_locator(mticks.AutoMinorLocator(6))
ax.set_ylabel(ylabel[datastream])
ax.set_xlabel(xlabel)
ax.legend(loc=legend_loc, fontsize=legend_fontsize, framealpha=1.0)
def _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints: SleepEndpointDict, **kwargs):
bed_start = pd.to_datetime(sleep_endpoints["bed_interval_start"])
bed_end = pd.to_datetime(sleep_endpoints["bed_interval_end"])
sleep_onset = pd.to_datetime(sleep_endpoints["sleep_onset"])
wake_onset = pd.to_datetime(sleep_endpoints["wake_onset"])
ax = kwargs.pop("ax")
if isinstance(sleep_endpoints, dict):
sleep_bouts = sleep_endpoints["sleep_bouts"]
wake_bouts = sleep_endpoints["wake_bouts"]
date = sleep_endpoints["date"]
else:
sleep_bouts = pd.DataFrame(sleep_endpoints["sleep_bouts"][0])
wake_bouts = pd.DataFrame(sleep_endpoints["wake_bouts"][0])
date = sleep_endpoints.index[0][1]
date = | pd.to_datetime(date) | pandas.to_datetime |
import dask.dataframe as dd
import deimos
from functools import partial
import multiprocessing as mp
import numpy as np
import pandas as pd
def threshold(features, by='intensity', threshold=0):
'''
Thresholds input :obj:`~pandas.DataFrame` using `by` keyword, greater than
value passed to `threshold`.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str
Variable to threshold by.
threshold : float
Threshold value.
Returns
-------
:obj:`~pandas.DataFrame`
Thresholded feature coordinates.
'''
return features.loc[features[by] > threshold, :].reset_index(drop=True)
def collapse(features, keep=['mz', 'drift_time', 'retention_time'], how=np.sum):
'''
Collpases input data such that only specified dimensions remain, according
to the supplied aggregation function.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
keep : str or list
Dimensions to keep during collapse operation.
how : function or str
Aggregation function for collapse operation.
Returns
-------
:obj:`~pandas.DataFrame`
Collapsed feature coordinates and aggregated
intensities.
'''
return features.groupby(by=keep,
as_index=False,
sort=False).agg({'intensity': how})
def locate(features, by=['mz', 'drift_time', 'retention_time'],
loc=[0, 0, 0], tol=[0, 0, 0], return_index=False):
'''
Given a coordinate and tolerances, return a subset of the
data.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str or list
Dimension(s) by which to subset the data.
loc : float or list
Coordinate location.
tol : float or list
Tolerance in each dimension.
return_index : bool
Return boolean index of subset if True.
Returns
-------
:obj:`~pandas.DataFrame`
Subset of feature coordinates and intensities.
:obj:`~numpy.array`
If `return_index` is True, boolean index of subset elements,
i.e. `features[index] = subset`.
Raises
------
ValueError
If `by`, `loc`, and `tol` are not the same length.
'''
# safely cast to list
by = deimos.utils.safelist(by)
loc = deimos.utils.safelist(loc)
tol = deimos.utils.safelist(tol)
# check dims
deimos.utils.check_length([by, loc, tol])
if features is None:
if return_index is True:
return None, None
else:
return None
# store index
rindex = features.index.values
# extend columns
cols = features.columns
cidx = [cols.get_loc(x) for x in by]
# subset by each dim
features = features.values
idx = np.full(features.shape[0], True, dtype=bool)
for i, x, dx in zip(cidx, loc, tol):
idx *= (features[:, i] <= x + dx) & (features[:, i] >= x - dx)
features = features[idx]
rindex = rindex[idx]
if return_index is True:
# data found
if features.shape[0] > 0:
return pd.DataFrame(features, index=rindex, columns=cols), idx
# no data
return None, idx
else:
# data found
if features.shape[0] > 0:
return pd.DataFrame(features, index=rindex, columns=cols)
# no data
return None
def locate_asym(features, by=['mz', 'drift_time', 'retention_time'],
loc=[0, 0, 0], low=[0, 0, 0], high=[0, 0, 0],
relative=[False, False, False], return_index=False):
'''
Given a coordinate and asymmetrical tolerances, return a subset of the
data.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str or list
Dimension(s) by which to subset the data.
loc : float or list
Coordinate location.
low : float or list
Lower tolerance(s) in each dimension.
high : float or list
Upper tolerance(s) in each dimension.
relative : bool or list
Whether to use relative or absolute tolerance per dimension.
return_index : bool
Return boolean index of subset if True.
Returns
-------
:obj:`~pandas.DataFrame`
Subset of feature coordinates and intensities.
:obj:`~numpy.array`
If `return_index` is True, boolean index of subset elements,
i.e. `features[index] = subset`.
Raises
------
ValueError
If `by`, `loc`, and `tol` are not the same length.
'''
# safely cast to list
by = deimos.utils.safelist(by)
loc = deimos.utils.safelist(loc)
low = deimos.utils.safelist(low)
high = deimos.utils.safelist(high)
relative = deimos.utils.safelist(relative)
# check dims
deimos.utils.check_length([by, loc, low, high, relative])
lb = []
ub = []
for x, lower, upper, rel in zip(loc, low, high, relative):
if rel is True:
lb.append(x * (1 + lower))
ub.append(x * (1 + upper))
else:
lb.append(x + lower)
ub.append(x + upper)
return deimos.subset.slice(features, by=by, low=lb, high=ub, return_index=return_index)
def slice(features, by=['mz', 'drift_time', 'retention_time'],
low=[0, 0, 0], high=[0, 0, 0], return_index=False):
'''
Given a feature coordinate and bounds, return a subset of the data.
Parameters
----------
features : :obj:`~pandas.DataFrame`
Input feature coordinates and intensities.
by : str or list
Dimensions(s) by which to subset the data
low : float or list
Lower bound(s) in each dimension.
high : float or list
Upper bound(s) in each dimension.
return_index : bool
Return boolean index of subset if True.
Returns
-------
:obj:`~pandas.DataFrame`
Subset of feature coordinates and intensities.
:obj:`~numpy.array`
If `return_index` is True, boolean index of subset elements,
i.e. `features[index] = subset`.
Raises
------
ValueError
If `by`, `low`, and `high` are not the same length.
'''
# safely cast to list
by = deimos.utils.safelist(by)
low = deimos.utils.safelist(low)
high = deimos.utils.safelist(high)
# check dims
deimos.utils.check_length([by, low, high])
if features is None:
if return_index is True:
return None, None
else:
return None
# store index
rindex = features.index.values
# extend columns
cols = features.columns
cidx = [cols.get_loc(x) for x in by]
# subset by each dim
features = features.values
idx = np.full(features.shape[0], True, dtype=bool)
for i, lb, ub in zip(cidx, low, high):
idx *= (features[:, i] <= ub) & (features[:, i] >= lb)
features = features[idx]
rindex = rindex[idx]
if return_index is True:
# data found
if features.shape[0] > 0:
return | pd.DataFrame(features, index=rindex, columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
from collections import defaultdict
from datetime import datetime, timedelta
def mta_end_of_week(d):
''' Calculates the end of the week for a given date to conform to MTA data publication on Saturday
d = date vaule
return: date
'''
return d - timedelta(days=d.weekday()) + timedelta(days=5)
def read_mta_turnstile(start='20180501', end='20180531'):
''' Read MTA turnstile data. Calculates 4-hour bucket entries and exits for each (C/A, UNIT, SCP, STATION)
start = start date for analysis in yyymmdd format
end = end date for analysis in yyymmmdd format
return pd.DataFrame (same list of columns as in the MTA CSV +
[date_time, entries_offset, exits_offset, hourly_entries, hourly_exits])
'''
first = mta_end_of_week(datetime.strptime(start, '%Y%m%d').date())
last = mta_end_of_week(datetime.strptime(end, '%Y%m%d').date())
url = 'http://web.mta.info/developers/data/nyct/turnstile/turnstile_'
df = | pd.DataFrame(columns=['C/A','UNIT','SCP','STATION','LINENAME','DIVISION','DATE','TIME','DESC','ENTRIES','EXITS']) | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial import distance
from matplotlib import rcParams
from numpy.random import seed
seed(123)
from scipy.stats.mstats import spearmanr
from scipy.stats.mstats import pearsonr
metric = 'euclidean'
method = 'ward'
test = False
compare = False
if not test:
plt.switch_backend('agg')
# Plot from scratch
def plot_RSA(output_dir, categories, layer= None, layer_name='lstm_3', amount_sent=None):
RSA = []
start = amount_sent
# corr_method = 'Pearson'
df0= layer[0:amount_sent] #sentences from first category
df0 = pd.DataFrame.transpose(df0) #result: vector_len x amount_sent, eg, 100x1000
# Create pair-wise correlation matrix between sentences from first category and second category
print('making RSA_arr...')
for cat in range(len(categories)):
row = []
for sent in range(start,layer.shape[0],amount_sent): #sentences from second category
df = layer[sent:sent+amount_sent]
df = | pd.DataFrame.transpose(df) | pandas.DataFrame.transpose |
import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from dataset_creation.params import Params
from dataset_creation.text_cleaning import TextCleaner
class DataPreparer:
@classmethod
def create_data_set(cls):
print('Started data set creation process...')
data_dict = cls.create_data_dict()
dataset = []
print('Creating final data set...')
input_text_len_list = []
output_text_len_list = []
for hadm_id, category_dict in data_dict.items():
# get output
y = category_dict.get(Params.DISCHARGE_SUMMARY_CATEGORY_VALUE_STR)
# check it data point is relevant and has a discharge summary
if not y:
continue
else:
# get input
x = ''
entries = []
for category in Params.OTHER_VALUES_CATEGORY_STR_LIST:
for entry in data_dict.get(hadm_id).get(category):
entries.append(entry)
entries_sorted = sorted(entries, key=lambda x: x[1])
for entry in entries_sorted:
x += entry[0]
x += ' '
if len(x) > 1000 and len(y[0][0]) > 100:
input_text_len_list.append(len(x.split()))
output_text_len_list.append(len(y[0][0].split()))
dataset.append([str(hadm_id), x, y[0][0]])
# text length statistics
print('Input Text Length:')
print('---- Max:', np.max(input_text_len_list))
print('---- Average:', np.mean(input_text_len_list))
print('---- Min:', np.min(input_text_len_list))
print('Output Text Length:')
print('---- Max:', np.max(output_text_len_list))
print('---- Average:', np.mean(output_text_len_list))
print('---- Min:', np.min(output_text_len_list))
# plot distribution
plt.hist(x=input_text_len_list, bins=100)
plt.xlabel('Bins')
plt.ylabel('#Strings in bin')
plt.title('Distribution of Input Text Length')
plt.savefig('./input_test_length_distribution.png', dpi=400)
plt.close()
plt.hist(x=output_text_len_list, bins=100)
plt.xlabel('Bins')
plt.ylabel('#Strings in bin')
plt.title('Distribution of Output Text Length')
plt.savefig('./output_text_length_distribution.png', dpi=400)
plt.close()
# save data set
print('Saving final data set...')
df = | pd.DataFrame(dataset, columns=['hadm_id', 'input_text', 'output_text']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import requests
import logging
import pandas as pd
pd.set_option("max_colwidth", 4096)
from lxml import etree
import requests
from odoo import api, fields, models, SUPERUSER_ID, _
_logger = logging.getLogger(__name__)
class WecomServerApiError(models.Model):
_name = "wecom.service_api_error"
_description = "Wecom Server API Error"
_order = "sequence"
name = fields.Char(
"Error description",
required=True,
readonly=True,
)
code = fields.Integer(
"Error code",
required=True,
readonly=True,
)
method = fields.Char(
"Treatment method",
readonly=True,
)
sequence = fields.Integer(default=0)
def get_error_by_code(self, code):
res = self.search(
[("code", "=", code)],
limit=1,
)
return {
"code": res.code,
"name": res.name,
"method": res.method,
}
def cron_pull_global_error_code(self):
self.pull()
@api.model
def pull(self):
"""
使用爬虫爬取 全局错误码
URL的一般格式为: protocol://hostname[:port]/path/[;parameters][?query]#fragment
"""
try:
_logger.info(_("Start pulling the global error code of WeCom."))
url = "https://open.work.weixin.qq.com/api/doc/90000/90139/90313"
page_text = requests.get(url=url).text
tree = etree.HTML(page_text)
lis = tree.xpath("//div[@id='js_doc_preview_content']/ul/li")
methods = []
for li in lis:
li_str = etree.tostring(li, encoding="utf-8").decode()
h5 = self.getMiddleStr(li_str, "<li>", "</h5>") + "</h5>"
code = self.getMiddleStr(h5, 'id="h5--', '"><a name=')
method_str = li_str.replace(h5, "")
method = self.getMiddleStr(method_str, "<li>", "</li>")
if "-" in code:
multiple_codes = code.split("-", 1)
for multiple_code in multiple_codes:
multiple_dic = {}
multiple_dic["code"] = multiple_code
multiple_dic["method"] = method
methods.append(multiple_dic)
else:
dic = {}
dic["code"] = code
dic["method"] = method
methods.append(dic)
table = tree.xpath("//div[@id='js_doc_preview_content']/table")
table = etree.tostring(
table[0], encoding="utf-8"
).decode() # 将第一个表格转成string格式
table = table.replace("<th>错误码</th>", "<th>code</th>")
table = table.replace("<th>错误说明</th>", "<th>name</th>")
table = table.replace("<th>排查方法</th>", "<th>method</th>")
df = | pd.read_html(table, encoding="utf-8", header=0) | pandas.read_html |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = | integer_array([1, 2], dtype="int8") | pandas.core.arrays.integer_array |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = | pd.read_csv(door_name, usecols=[0, 1]) | pandas.read_csv |
import argparse
import os
import torch
import time
import numpy as np
import pandas as pd
import shutil
from data_utils import g_node_col, g_date_col, process_cdc_truth_from_csse, process_cdc_loc, get_all_cdc_label, read_cdc_forecast
from base_task import load_json_from
# exp_dir_template = '../Exp_us_{}_{}' # level, forecast_date
cdc_forecast_dir = '../covid19-forecast-hub/data-processed'
def load_exp_res(exp_dir, extra_columns=None):
task_dirs = os.listdir(exp_dir)
test_results = []
for task_dir in task_dirs:
task_items = task_dir.split('_')
target, horizon, model, seed = task_items[:4]
horizon = int(horizon)
if len(task_items) == 4:
seed = int(seed.lstrip('seed'))
else:
seed = '_'.join([seed.lstrip('seed')] + task_items[4:])
if model == 'gbm':
gbm_out = pd.read_csv(os.path.join(exp_dir, task_dir, 'test_out.csv'), parse_dates=[g_date_col])
test_res = gbm_out[[g_date_col, g_node_col, 'pred', 'label']].fillna(0)
else:
try:
nn_out = torch.load(os.path.join(exp_dir, task_dir, 'Output/test.out.cpt'))
except:
print(f'Warning: {os.path.join(exp_dir, task_dir)} is an incomplete task directory! ...skip...')
continue
if 'y_scale' in nn_out and nn_out['y_scale'] == 'linear':
log_scale = False
else:
log_scale = True
nn_pred = nn_out['pred'].reset_index(drop=False)
nn_pred['pred'] = np.expm1(nn_pred['val']) if log_scale else nn_pred['val']
nn_pred[g_date_col] = nn_out['dates']
nn_pred[g_node_col] = nn_out['countries'] if 'countries' in nn_out else nn_out['nodes']
nn_label = nn_out['label'].reset_index(drop=False)
nn_label['label'] = np.expm1(nn_label['val']) if log_scale else nn_label['val']
nn_label[g_date_col] = nn_out['dates']
nn_label[g_node_col] = nn_out['countries'] if 'countries' in nn_out else nn_out['nodes']
test_res = pd.merge(nn_pred, nn_label, on=[g_date_col, g_node_col])[[g_date_col, g_node_col, 'pred', 'label']]
if extra_columns is not None:
cfg = load_json_from(os.path.join(exp_dir, task_dir, 'config.json'))
for extra_col in extra_columns:
if extra_col == 'best_epoch':
test_res[extra_col] = nn_out['epoch']
else:
test_res[extra_col] = cfg[extra_col]
test_res['target'] = target
test_res['horizon'] = horizon
test_res['model'] = model
test_res['seed'] = seed
test_results.append(test_res)
exp_res = pd.concat(test_results, axis=0).sort_values(['target', 'horizon', 'model', 'seed', g_node_col]).reset_index(drop=True)
return exp_res
def merge_cdc_loc(raw_pred):
# ensure the order
raw_pred = raw_pred.sort_values([g_date_col, g_node_col, 'target', 'horizon'])
# align g_node_col with cdc location
locs = process_cdc_loc()
node2loc = dict(zip(locs[g_node_col], locs['location']))
raw_pred['location'] = raw_pred[g_node_col].map(lambda x: node2loc.get(x, pd.NA))
return raw_pred
def merge_last_cum_truth(raw_pred, forecast_date, cdc_cum_truth=None):
if 'location' not in raw_pred.columns:
raw_pred = merge_cdc_loc(raw_pred)
if cdc_cum_truth is None:
cdc_confirmed_cum_truth = process_cdc_truth_from_csse('confirmed', stat_type='cum')
cdc_deaths_cum_truth = process_cdc_truth_from_csse('deaths', stat_type='cum')
cdc_confirmed_cum_truth['target'] = 'confirmed'
cdc_deaths_cum_truth['target'] = 'deaths'
cdc_cum_truth = pd.concat([cdc_confirmed_cum_truth, cdc_deaths_cum_truth], axis=0, ignore_index=True)
# merge cdc cumulative info into forecasting results
last_date = pd.to_datetime(forecast_date) + pd.Timedelta(-1, unit='day')
last_cum_truth = cdc_cum_truth[cdc_cum_truth['date'] == last_date]
raw_pred = pd.merge(raw_pred, last_cum_truth[['location', 'target', 'value']].rename(columns={'value': 'cum_sum'}),
on=['location', 'target'], how='left')
# remove useless nodes that do not have a cdc location
# TODO: do this when training our models
useless_nodes = raw_pred[raw_pred['location'].isnull()][g_node_col].unique()
if useless_nodes.size > 0:
print(f'# useless nodes in our models {useless_nodes.size}, ...removed...')
raw_pred = raw_pred.dropna(subset=['location', 'cum_sum']).reset_index(drop=True)
return raw_pred
def transform_to_cdc_format(raw_pred, forecast_date):
if 'cum_sum' not in raw_pred.columns:
raise Exception('You should run merge_last_cum_truth before this function')
# raw_pred = merge_last_cum_truth(raw_pred, forecast_date)
# transform into CDC formats
target2tag = {
'confirmed': 'case',
'deaths': 'death',
}
cdc_results = []
for target in ['confirmed', 'deaths']:
tag = target2tag[target]
for n_week in [1, 2, 3, 4]:
horizon = n_week * 7
for stat_type in ['inc', 'cum']:
cdc_target = f'{n_week} wk ahead {stat_type} {tag}'
cdc_target_end_date = pd.to_datetime(forecast_date) + pd.Timedelta(horizon-1, unit='day')
# print(cdc_target)
cdc_res = raw_pred[(raw_pred['target'] == target) & (raw_pred['horizon'] == horizon)].reset_index(drop=True).copy()
if stat_type == 'inc':
if n_week == 1:
cdc_res['value'] = cdc_res['pred']
else:
cdc_res['value'] = cdc_res['pred'] - raw_pred[(raw_pred['target'] == target) & (raw_pred['horizon'] == horizon-7)].reset_index(drop=True)['pred']
else:
cdc_res['value'] = cdc_res['cum_sum'] + cdc_res['pred']
cdc_res = cdc_res.rename(columns={g_date_col: 'forecast_date', 'target': 'model_target'})
cdc_res['target'] = cdc_target
cdc_res['target_end_date'] = cdc_target_end_date
cdc_res['type'] = 'point'
cdc_res['quantile'] = pd.NA
cdc_results.append(cdc_res[[
'forecast_date', 'target', 'target_end_date', 'location', 'type', 'quantile', 'value'
]])
all_cdc_res = | pd.concat(cdc_results, axis=0, ignore_index=True) | pandas.concat |
import itertools
from sklearn.model_selection import train_test_split
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
import pandas as pd
import re
PATTERN = re.compile(r"((?P<days1>[1-9]\d*)D(?P<amount1>[1-9]\d*[NP])_)?((?P<days2>[1-9]\d*)D(?P<amount2>[1-9]\d*[NP])_)?(?P<noshow>[1-9]\d*[NP])?")
def cancel_parser(policy: str, nights_num):
if nights_num <= 0:
nights_num = 1
match = PATTERN.match(policy)
if match is None:
return policy
else:
noshow = match.group("noshow")
noshow = 1 if noshow is None else int(noshow[:-1])/100 if noshow[-1] == 'P' else int(noshow[:-1]) / nights_num
days1 = match.group("days1")
if days1 is None:
days1 = 0
amount1 = noshow
else:
days1 = int(days1)
amount1 = match.group("amount1")
amount1 = int(amount1[:-1])/100 if amount1[-1] == 'P' else int(amount1[:-1])/nights_num
days2 = match.group("days2")
if days2 is None:
days2 = 0
amount2 = amount1
else:
days2 = int(days2)
amount2 = match.group("amount2")
amount2 = int(amount2[:-1])/100 if amount2[-1] == 'P' else int(amount2[:-1])/nights_num
return days1, amount1, days2, amount2, noshow
def agoda_preprocessor(full_data: np.ndarray):
# fill cancellation datetime which doesn't exist as 0
full_data.loc[full_data["cancellation_datetime"].isnull(), "cancellation_datetime"] = full_data["checkin_date"]
full_data['cancellation_datetime'] = pd.to_datetime(full_data["cancellation_datetime"])
features = data_preprocessor(full_data)
full_data["cancel_warning_days"] = (full_data['checkin_date'] - full_data['cancellation_datetime']).dt.days
full_data["days_cancelled_after_booking"] = (full_data["cancellation_datetime"] - full_data["booking_datetime"]).dt.days
labels = (7 <= full_data["days_cancelled_after_booking"]) & (full_data["days_cancelled_after_booking"] <= 43)
return features, np.asarray(labels).astype(int)
def load_agoda_dataset():
"""
Load Agoda booking cancellation dataset
Returns
-------
Design matrix and response vector in the following format:
- Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# clean data for unrealistic shit
full_data = | pd.read_csv("../datasets/agoda_cancellation_train.csv") | pandas.read_csv |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
from collections import Counter
import math
import xgboost as xgb
import pickle
import sys, os
from acheron.helpers import model_evaluators
def is_valid_type(val):
# covers core floats, numpy floating and complex floating
if isinstance(val, (float, np.inexact)):
if math.isnan(val) or np.isnan(val):
return False
else:
return True
# covers core ints, numpy ints (signed unsigned), longs, shorts, bytes
# TODO check for bytes later, as they do NOT like math eqns when exponential
elif isinstance(val, (int, np.integer)):
return True
# covers core and np strings, excludes unicode
elif isinstance(val, (str, np.str_)):
if val.upper() in ['INVALID', 'NAN']:
return False
else:
return True
else:
return False
def make_mask(label_matrix, k):
"""
Takes in a label matrix and saves a mask of which labels are valid
for each index, this allows the model to keep a sample when only a single
columns is invalid, instead of discarding the sample.
If doing kfold cross validation (k != 1), there must be at least k samples
in each class for that class to be considered valid
"""
mask = np.zeros((label_matrix.values.shape), dtype='bool')
invalid_classes = {}
# if any class has fewer than k samples, it is marked as invalid
for attribute in label_matrix.columns:
class_counter = Counter(label_matrix[attribute])
invalid_classes[attribute] = []
for key in class_counter:
if class_counter[key] < k:
invalid_classes[attribute].append(key)
for i, row in enumerate(label_matrix.values):
for j, col in enumerate(row):
if not is_valid_type(col):
continue
if col in invalid_classes[label_matrix.columns[j]]:
continue
mask[i,j] = True
return pd.DataFrame(data=mask, columns = label_matrix.columns,
index = label_matrix.index)
def make_split(label_matrix, mask, k, samples):
"""
Takes a label matrix, splits it according to k fold cross validation
but only for valid samples. Produces a random split each time (stratified)
"""
assert(2<=k<=255) # if exceeding 255, change dtype below to uint16
split_df = pd.DataFrame(data=np.zeros(label_matrix.shape),
columns=label_matrix.columns, index = label_matrix.index, dtype='uint8')
split_df = split_df[~split_df.index.duplicated(keep='first')]
for col in label_matrix.columns:
# which labels are valid in this specific column
valid_labels = label_matrix[col].values[mask[col].values]
# matching sample name for each i in valid_labels
valid_samples = label_matrix.index.values[mask[col].values]
if len(valid_samples) == 0:
print("All samples in column "+col+" are invalid, skipping split")
continue
# in the event of duplicates, keep only the first seen instance
processed = []
# we also need to factor in that we only have the samples in /samples,
# where a datasheet might have thousands of valid, but extra datapoints
#seen_bool_mask = np.array([i in samples and i not in duplicates for i in valid_samples])
seen_bool_mask = []
for i in valid_samples:
if i in processed:
seen_bool_mask.append(False)
else:
processed.append(i)
if i in samples:
seen_bool_mask.append(True)
else:
seen_bool_mask.append(False)
seen_bool_mask = np.array(seen_bool_mask)
final_labels = valid_labels[seen_bool_mask]
final_samples = valid_samples[seen_bool_mask]
# at this point, we only have labels and samples that are eligible
# for machine learning
skf = StratifiedKFold(n_splits=k, shuffle=True)
num_samples = len(final_samples)
splits = enumerate(skf.split(np.zeros((num_samples,k)),final_labels))
for i, split in splits:
# pull the array of values assigned to the testing set,
# label these genomes as per the fold they belong to
for sample in final_samples[split[1]]:
split_df.at[sample,col] = i
return split_df
def load_data(dataset_name, label_name, trial, type, num_feats, k, attribute):
"""
load requested dataset, mask, and split
"""
features = pd.read_pickle("data/{}/features/{}_matrix.df".format(dataset_name, type))
labels = pd.read_pickle("data/{}/labels/{}.df".format(dataset_name,label_name))[attribute]
mask = pd.read_pickle("data/{}/features/masks/{}_{}.df".format(dataset_name,type,label_name))[attribute]
if k!=1:
split = pd.read_pickle("data/{}/splits/split{}_{}_{}_{}xCV.df".format(dataset_name,trial,type,label_name,k))
else:
split = []
features, labels = apply_mask(features, labels, mask)
return features, labels, mask, split
def train_model(features, label, model_type, num_classes):
"""
Converts feature and label matrices in a trained model
Sci-kit models are at the end, as they share a fit method
"""
# XGBoost
if model_type.upper() in ['XGB', 'XGBOOST']:
if num_classes == 2:
objective = 'binary:logistic'
else:
objective = 'multi:softmax'
# this is is probably going to suck for memory, so lets revist XGBClassifier
# if we explode past our ram usage on this step
xgb_matrix = xgb.DMatrix(features.values, label, feature_names=features.columns)
params = {'objective':objective, 'num_class': num_classes}
#params = {'objective':objective}
booster = xgb.train(params, xgb_matrix)
return booster
# Artificial Neural Network
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from keras.utils import np_utils, to_categorical
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
cat_labels = to_categorical(label, num_classes)
patience = 16
early_stop = EarlyStopping(monitor='loss', patience=patience, verbose=1, min_delta=0.005, mode='auto')
reduce_LR = ReduceLROnPlateau(monitor='loss', factor= 0.1, patience=(patience/2), verbose = 1, min_delta=0.005,mode = 'auto', cooldown=0, min_lr=0)
num_feats = len(features.columns)
model = Sequential()
model.add(Dense(int(((num_feats+num_classes)/2)),activation='relu',input_dim=(num_feats)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, kernel_initializer='uniform', activation='softmax'))
if num_classes == 2:
loss = "binary_crossentropy"
else:
loss = "poisson"
model.compile(loss=loss, metrics=['accuracy'], optimizer='adam')
model.fit(features.values, cat_labels, epochs=100, verbose=1, callbacks=[early_stop, reduce_LR])
return model
# Support Vector Classifier
# https://scikit-learn.org/stable/modules/svm.html#classification
if model_type.upper() in ['SVC', 'SVM']:
from sklearn import svm
model = svm.SVC()
# Support Vector Regressor
# https://scikit-learn.org/stable/modules/svm.html#regression
elif model_type.upper() in ['SVR']:
from sklearn import svm
model = svm.SVR()
# Stochastic Gradient Descent Classifier
# https://scikit-learn.org/stable/modules/sgd.html#classification
elif model_type.upper() in ['SGDC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="hinge", penalty="l2", max_iter=25)
# Perceptron
# https://scikit-learn.org/stable/modules/linear_model.html#perceptron
elif model_type.upper() in ['PERC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)
# Passive Aggressive Algorithms
# https://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
# https://www.geeksforgeeks.org/passive-aggressive-classifiers
elif model_type.upper() in ['PAC']:
from sklearn.linear_model import PassiveAggressiveClassifier
model = PassiveAggressiveClassifier(max_iter=100)
# Nearest Neighbours Classifier
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification
elif model_type.upper() in ['NNC']:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
# Nearest Neighbours Regressor
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-regression
elif model_type.upper() in ['NNR']:
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor(n_neighbors=3)
# Gaussian Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#gaussian-naive-bayes
elif model_type.upper() in ['GNB']:
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
# Multinomial Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#multinomial-naive-bayes
elif model_type.upper() in ['MNB']:
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
# Categorical Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#categorical-naive-bayes
elif model_type.upper() in ['CNB']:
from sklearn.naive_bayes import CategoricalNB
model = CategoricalNB()
# Decision Tree Classifier
# https://scikit-learn.org/stable/modules/tree.html#classification
elif model_type.upper() in ['DTC']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
# Decision Tree Regressor
# https://scikit-learn.org/stable/modules/tree.html#regression
elif model_type.upper() in ['DTR']:
from sklearn import tree
model = tree.DecisionTreeRegressor()
# AdaBoost
# https://scikit-learn.org/stable/modules/ensemble.html#adaboost
elif model_type.upper() in ['ADA']:
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier()
# Gradient Boosted Decision Trees
# https://scikit-learn.org/stable/modules/ensemble.html#gradient-tree-boosting
elif model_type.upper() in ['GBDT']:
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
# Multi-layer Perceptron Classifier
# https://scikit-learn.org/stable/modules/neural_networks_supervised.html#multi-layer-perceptron
elif model_type.upper() in ['MLPC']:
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
else:
raise Exception("model type {} not defined".format(model_type))
model.fit(features.values, label)
return model
def train_hyper_model(x_train, y_train, x_val, y_val, model_type, num_classes):
"""
Trains a hyperparameter optimized model
"""
from acheron.workflows import hyp
best_model, best_params = hyp.get_best(x_train, y_train, x_val, y_val, model_type, num_classes)
return best_model, best_params
"""
Unsure if saving required, to do without
import time
from random import seed
from random import random
seed(time.time())
test_id = random()
save_path = "data/hyp_data/{}/".format(test_id)
os.makedirs(save_path, exist_ok=False)
for i, data_chunk in enumerate([x_train, y_train, x_val, y_val]):
if i%2 == 0:
data_chunk.to_pickle("{}{}.pkl".format(save_path,i))
else:
np.save("{}{}.npy".format(save_path,i),data_chunk)
trials = Trials()
# https://towardsdatascience.com/an-example-of-hyperparameter-optimization-on-xgboost-lightgbm-and-catboost-using-hyperopt-12bc41a271e
# Search Space Subject to Change!!
if model_type.upper() in ['XGB','XGBOOST']:
search_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(1, 8, 1, dtype=int)),
'min_child_weight': hp.choice('min_child_weight', np.arange(1, 8, 1, dtype=int)),
'colsample_bytree': hp.choice('colsample_bytree', np.arange(0.3, 0.8, 0.1)),
'subsample': hp.uniform('subsample', 0.8, 1),
'num_class': num_classes,
'test_id': test_id
}
best_index = fmin(
fn=hyp.xgboost_objective, space=search_params,
algo=tpe.suggest, max_evals=100, trials=trials)
best_params = space_eval(search_params, best_index)
if num_classes == 2:
best_params['objective'] = 'binary:logistic'
else:
best_params['objective'] = 'multi:softmax'
best_params['n_estimators'] = 10
best_params['num_class'] = num_classes
xgb_matrix = xgb.DMatrix(x_train.values, y_train, feature_names=x_train.columns)
booster = xgb.train(best_params, xgb_matrix)
return booster, best_params
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from acheron.workflows import hyp
best_run, best_model = optim.minimize(
model=hyp.create_model,
data=load_hyp_data,
algo=tpe.suggest,
max_evals=10,
trials=Trials(),
keep_temp=True)
return best_model, best_run
"""
#else:
#raise Exception("model type {} not defined".format(model_type))
"""
# Minimal Cost-Complexity Pruning
# https://scikit-learn.org/stable/modules/tree.html#minimal-cost-complexity-pruning
elif model_type.upper() in ['MCCP']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
path = model.cost_complexity_pruning_path(features.values, labels)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
models = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(ccp_alpha=ccp_alpha)
clf.fit(features.values, labels)
clfs.append(clf)
# now use validation set to see which model did best, use that alpha to train final model
"""
def predict(model, features, model_type):
"""
Takes a model and a feature set, returns an label like array of predictions
"""
if model_type.upper() in ['XGB', 'XGBOOST']:
xgb_matrix = xgb.DMatrix(features.values, feature_names = features.columns)
return [round(i) for i in model.predict(xgb_matrix, validate_features=True)]
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
# This will be in categorical form, need to decode it
prediction = model.predict_classes(features)
#return np.argmax(prediction, axis=1)
return prediction
else:
try:
return [round(i) for i in model.predict(features)]
except:
raise Exception("model type {} not defined".format(model_type))
def evaluate_model(predicted, actual, model_type, dilutions, attribute, encoder):
"""
Evaluates how well a model did (accuracy)
For mic modules, also reports off-by-one accuracy and error rates
Takes encoded class labels (0,1,2) not decoded values (2,4,8,16)
"""
# this df will eventually get all info about the test
direct_accuracy = np.sum([predicted[i]==actual[i] for i in range(len(predicted))])/len(predicted)
dilutional_accuracies = {}
find_errors = False
if len(dilutions) > 0:
find_errors = True
for dilution in dilutions:
total = 0
correct = 0
for i in range(len(predicted)):
total +=1
if abs(predicted[i]-actual[i]) <= dilution:
correct +=1
dilutional_accuracies[dilution] = correct/total
data = [len(predicted),direct_accuracy]
columns = ["Supports", "Accuracy"]
for dilution in dilutions:
if str(dilution) == '0':
continue
else:
data.append(dilutional_accuracies[dilution])
columns.append("Within {} Dilution".format(dilution))
if find_errors:
decoder = {v:k for k,v in encoder.items()}
pred_decoded = [decoder[i] for i in predicted]
act_decoded = [decoder[i] for i in actual]
errors = [model_evaluators.find_error_type(i[0],i[1], attribute) for i in zip(pred_decoded, act_decoded)]
error_counts = Counter(errors)
error_types = ["Very Major Error", "Major Error", "Non Major Error", "Correct"]
total_errors = 0
for error_type in error_types:
total_errors += error_counts[error_type]
percent = error_counts[error_type]/len(predicted)
data.append(percent)
columns.append(error_type)
try:
assert len(predicted) == total_errors
except:
print('Number of Errors+Correct does not equal number of predictions')
raise
results_df = pd.DataFrame(data=[data], columns=columns)
return results_df
def mean_summaries(summaries):
"""
Takes a list of model summaries
averages them appropriately, relevant to number of supports
"""
try:
indx = summaries[0].index[0]
except:
indx = 0
mean_df = pd.DataFrame(columns=summaries[0].columns, index=[indx])
total_supports = 0
proportion = {}
for summary in summaries:
num_sups = summary['Supports'][0]
total_supports += num_sups
for col in summary.columns:
if col != "Supports":
if col in proportion.keys():
proportion[col] += num_sups*summary[col][0]
else:
proportion[col] = num_sups*summary[col][0]
mean_df.loc[indx,'Supports'] = total_supports
for k,v in proportion.items():
mean_df.loc[indx, k] = v/total_supports
return mean_df
def mean_prec_recall(prec_recall_dfs):
"""
Takes a list of precision_recall_fscore_support dataframes
Returns the mean df based on proportion of supports
"""
indeces = prec_recall_dfs[0].index
done_rows = []
for indx in indeces:
rows = [i[i.index == indx] for i in prec_recall_dfs]
done_rows.append(mean_summaries(rows))
return pd.concat(done_rows)
def apply_mask(features, labels, mask):
"""
Takes in a pandas dataframe or series with a mask
and returns only valid samples
Mask series looks like:
AMC
BioSample
SAMN00000001 False
SAMN00000002 True
"""
# its important to note that the mask is for everything in the label df, but
# when we mask the features, that df might not have it
# therefore we reduce the mask to samples that are seen
if isinstance(features, pd.DataFrame):
seen = list(features.index)
skip_nan_check = False
elif isinstance(features, list):
seen = features
skip_nan_check = True
else:
raise exception("Needs list of features or dataframe with declared index, not {}".format(type(features)))
mask = mask[[i in seen for i in mask.index]]
labels = labels[[i in seen for i in labels.index]]
# prior to reindexing, we need to make sure there are no duplicates
mask = mask[~mask.index.duplicated(keep='first')]
labels = labels[~labels.index.duplicated(keep='first')]
# reorder dataframes to make sure they are in the same order as the features
mask = mask.reindex(seen)
labels = labels.reindex(seen)
# remove samples that we dont have data for
# (these appear as nans in the mask)
if not skip_nan_check:
try:
mask = pd.Series(index = mask.index,
data = [False if np.isnan(i) else i for i in mask])
except:
for i in mask:
try:
np.isnan(i)
except:
print("{} cannot be checked if isnan".format(i))
raise
# double check that the mask biosample order matches the feature biosample order
for i, biosample in enumerate(mask.index):
assert biosample == seen[i]
if isinstance(features, pd.Series) or isinstance(features, pd.DataFrame):
labels = labels[list(mask)]
features = features[list(mask)]
elif isinstance(features, list):
labels = labels[list(mask)]
features = pd.Series(features)[list(mask)]
else:
raise Exception("Masking of type {} is not defined".format(type(features)))
return features, labels
def prefilter(metric, cutoff, train, ):
"""
Reads variance from prerecorded variance matrices
Pulls a list of features to be kept to be passed into select_features()
pre_selection parameter
When metric == p_val, set cutoff to a decimal where all features equal or more equivalent are returned.
This means that the number of returned features will vary.
when metric == f_val, set cutoff to how many features you want returned.
i.e. a cutoff of 1000 will return the 1000 features with the most variance.
"""
import operator
import heapq
ram_DENOM = 5 # IF YOU CHANGE THIS, CHANGE IN acheron/workflows/supervised_model.py
var_df = []
for slice in range(ram_denom):
variance_path = "data/{}/variance/slice{}_of_{}_trial={}_type={}_label={}_attribute={}_fold={}_of_{}xCV.df".format(
train, slice+1, ram_DENOM, trial, type, label, attribute, fold, cv)
if slices == 0:
var_df = pd.read_pickle(var_path)
elif slices >= ram_DENOM+1:
raise Exception("slice number out of bounds for ran_denom")
else:
var_split = | pd.read_pickle(var_path) | pandas.read_pickle |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2 / 3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = | pd.Series(lower * std + y) | pandas.Series |
"""
Module with classes and methods to perform kriging of elements (and at some point exploit the potential field to
choose the directions of the variograms)
Tested on Ubuntu 16
Created on 1/5/2017
@author: <NAME>
"""
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
import pymc3 as pm
import numpy as np
import pandas as pn
from bokeh.io import show
import bokeh.layouts as bl
import bokeh.plotting as bp
def choose_lithology_elements(df, litho, elem=None, coord = True):
"""
litho(str): Name of the lithology-domain
elem(list): List of strings with elements you want to analyze
"""
# Choosing just the opx litology
if elem is not None:
if coord:
domain = df[df['Lithology'] == litho][np.append(['X', 'Y', 'Z'], elem)]
else:
domain = df[df['Lithology'] == litho][elem]
# Drop negative values
domain = domain[(domain[elem] > 0).all(1)]
else:
domain = df[df['Lithology'] == litho][['X', 'Y', 'Z']]
return domain
def select_segmented_grid(df, litho, grid, block):
block = np.squeeze(block)
assert grid.shape[0] == block.shape[0], 'The grid you want to use for kriging and the grid used for the layers ' \
'segmentation are not the same'
litho_num = df['Lithology Number'][df["Lithology"] == litho].iloc[0]
segm_grid = grid[block == litho_num]
return segm_grid
def transform_data(df_o, n_comp=1, log10=False):
"""
Method to improve the normality of the input data before perform krigin
Args:
df_o: Dataframe with the data to interpolate
n_comp: Number of component in case of multimodal data
log10 (bool): If true return the log in base 10 of the properties:
Returns:
pandas.core.frame.DataFrame: Data frame with the transformed data
"""
import copy
df = copy.deepcopy(df_o)
# Take log to try to aproximate better the normal distributions
if log10:
print('computing log')
df[df.columns.difference(['X', 'Y', 'Z'])] = np.log10(df[df.columns.difference(['X', 'Y', 'Z'])])
# Finding n modes in the data
if n_comp > 1:
from sklearn import mixture
gmm = mixture.GaussianMixture(n_components=n_comp,
covariance_type='full').fit(df[df.columns.difference(['X', 'Y', 'Z'])])
# Adding the categories to the pandas frame
labels_all = gmm.predict(df[df.columns.difference(['X', 'Y', 'Z'])])
df['cluster'] = labels_all
return df
def theano_sed():
"""
Function to create a theano function to compute the euclidian distances efficiently
Returns:
theano.compile.function_module.Function: Compiled function
"""
theano.config.compute_test_value = "ignore"
# Set symbolic variable as matrix (with the XYZ coords)
coord_T_x1 = T.dmatrix()
coord_T_x2 = T.dmatrix()
# Euclidian distances function
def squared_euclidean_distances(x_1, x_2):
sqd = T.sqrt(T.maximum(
(x_1 ** 2).sum(1).reshape((x_1.shape[0], 1)) +
(x_2 ** 2).sum(1).reshape((1, x_2.shape[0])) -
2 * x_1.dot(x_2.T), 0
))
return sqd
# Compiling function
f = theano.function([coord_T_x1, coord_T_x2],
squared_euclidean_distances(coord_T_x1, coord_T_x2),
allow_input_downcast=False)
return f
# This is extremily ineficient. Try to vectorize it in theano, it is possible to gain X100
def compute_variogram(df, properties, euclidian_distances, tol=10, lags=np.logspace(0, 2.5, 100), plot=[]):
"""
Compute the experimental variogram and cross variogram for a par of properties
Args:
df (pandas.core.frame.DataFrame): Dataframe with the properties and coordinates used in the experimental
variogram computation
properties (list): List of the two properties to compute the semivariogram.
euclidian_distances (numpy.array): Precomputed distances of the euclidian distances
tol (float): Tolerance
lags (list): List of lags to compute the experimental variogram
plot (bool): If true plot the experimental variogram after computed
Returns:
list: semvariance aor cross-semivariance
"""
# Tiling the properties to a square matrix
element = (df[properties[0]].as_matrix().reshape(-1, 1) -
np.tile(df[properties[1]], (df[properties[1]].shape[0], 1))) ** 2
# Semivariance computation
semivariance_lag = []
# Looping every lag to compute the semivariance
for i in lags:
# Selecting the points at the given lag and tolerance
points_at_lag = ((euclidian_distances > i - tol) * (euclidian_distances < i + tol))
# Extracting the values of the properties of the selected lags
var_points = element[points_at_lag]
# Appending the semivariance
semivariance_lag = np.append(semivariance_lag, np.mean(var_points) / 2)
if "experimental" in plot:
# Visualizetion of the experimental variogram
plt.plot(lags, semivariance_lag, 'o')
return semivariance_lag
def exp_lags(max_range, exp=2, n_lags=100):
"""
Function to create a more specific exponential distance between the lags in case that log10 gives too much weight
to the smaller lags
Args:
max_range(float): Maximum distance
exp (float): Exponential degree
n_lags (int): Number of lags
Returns:
list: lags
"""
lags = np.empty(0)
for i in range(n_lags):
lags = np.append(lags, i ** exp)
lags = lags / lags.max() * max_range
return lags
def compute_crossvariogram(df, properties_names, euclidian_distances=None, **kwargs):
"""
Compute the experimental crossvariogram of all properties given
Args:
df (pandas.core.frame.DataFrame): Dataframe with the properties and coordinates used in the experimental
variogram computation
properties_names (list str): List of strings with the properties to compute
euclidian_distances (numpy.array): Precomputed euclidian distances. If None they are computed inplace
Keyword Args:
- lag_exp: Degree of the exponential. If None log10
- lag_range: Maximum distance to compute a lag
- n_lags: Number of lags
Returns:
pandas.core.frame.DataFrame: Every experimental cross-variogram
"""
lag_exp = kwargs.get('lag_exp', None)
lag_range = kwargs.get('lag_range', 500)
n_lags = kwargs.get('n_lags', 100)
# Choosing the lag array
if lag_exp is not None:
lags = exp_lags(lag_range, lag_exp, n_lags)
else:
lags = np.logspace(0, np.log10(lag_range), n_lags)
# Compute euclidian distance
if not euclidian_distances:
euclidian_distances = theano_sed()(df[['X', 'Y', 'Z']], df[['X', 'Y', 'Z']])
# Init dataframe to store the results
experimental_variograms_frame = | pn.DataFrame() | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
# This is the desired future behavior
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = date_range('2016-01-01', periods=3, tz='US/Central')
with pytest.raises(TypeError):
DatetimeIndex(dti, tz='Asia/Tokyo')
def test_construction_with_nat_and_tzlocal(self):
tz = dateutil.tz.tzlocal()
result = DatetimeIndex(['2018', 'NaT'], tz=tz)
expected = DatetimeIndex([Timestamp('2018', tz=tz), pd.NaT])
tm.assert_index_equal(result, expected)
class TestTimeSeries(object):
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range('2014', '2015', freq='M', tz=tz)
expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)
rng2 = date_range('2014', '2015', freq='MS', tz=tz)
expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz)
rng3 = date_range('2014', '2020', freq='A', tz=tz)
expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz)
rng4 = date_range('2014', '2020', freq='AS', tz=tz)
expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz)
for rng, expected in [(rng1, expected1), (rng2, expected2),
(rng3, expected3), (rng4, expected4)]:
tm.assert_index_equal(rng, expected)
def test_dti_constructor_small_int(self, any_int_dtype):
# see gh-13721
exp = DatetimeIndex(['1970-01-01 00:00:00.00000000',
'1970-01-01 00:00:00.00000001',
'1970-01-01 00:00:00.00000002'])
arr = np.array([0, 10, 20], dtype=any_int_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
assert rng[0].second == 1
def test_is_(self):
dti = date_range(start='1/1/2005', end='12/1/2005', freq='M')
assert dti.is_(dti)
assert dti.is_(dti.view())
assert not dti.is_(dti.copy())
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
assert (idx.values == conversion.ensure_datetime64ns(arr)).all()
def test_constructor_int64_nocopy(self):
# GH#1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
assert (index.asi8[50:100] != -1).all()
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', 'BH',
'T', 'S', 'L', 'U', 'H', 'N', 'C'])
def test_from_freq_recreate_from_data(self, freq):
org = date_range(start='2001/02/01 09:00', freq=freq, periods=1)
idx = DatetimeIndex(org, freq=freq)
tm.assert_index_equal(idx, org)
org = date_range(start='2001/02/01 09:00', freq=freq,
tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=freq, tz='US/Pacific')
tm.assert_index_equal(idx, org)
def test_datetimeindex_constructor_misc(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
pytest.raises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = | DatetimeIndex(arr) | pandas.DatetimeIndex |
from utils import *
import time, copy, os, glob, csv, ast
import pandas as pd
import numpy as np
from collections import defaultdict
from config import parameters
from PatternHandler import PatternHandler
from DependencyGraphHandler import DependencyGraphHandler
from SubsetHandler import SubsetHandler
from sklearn.feature_selection import mutual_info_classif
from sklearn.model_selection import KFold
from tqdm import tqdm
tqdm.pandas()
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=parameters.num_cpus, progress_bar=True)
domains = parameters.domains
pattern_types = parameters.pattern_types
allow_f1_decrease_count = parameters.allow_f1_decrease_count
config_option = parameters.config_option
min_pattern_count = parameters.min_pattern_count
max_pick_count = parameters.max_pick_count
min_pattern_f1 = parameters.min_pattern_f1
data_filepath = parameters.data_filepath
lexicon_filepath = parameters.lexicon_filepath
output_time_txt_filepath = parameters.output_time_txt_filepath
output_pattern_csv_filepath = parameters.output_pattern_csv_filepath
output_error_csv_filepath = parameters.output_error_csv_filepath
output_target_log_csv_filepath = parameters.output_target_log_csv_filepath
output_raw_df_pkl_filepath_ = parameters.output_raw_df_pkl_filepath
output_training_test_dfs_pkl_filepath = parameters.output_training_test_dfs_pkl_filepath
output_pattern_counter_pkl_filepath = parameters.output_pattern_counter_pkl_filepath
output_targets_dir = parameters.output_targets_dir
output_targets_concat_csv_filepath = parameters.output_targets_concat_csv_filepath
output_pattern_quality_estimation_csv_filepath = parameters.output_pattern_quality_estimation_csv_filepath
output_subset_selection_log_filepath = parameters.output_subset_selection_log_filepath
output_subset_pkl_filepath = parameters.output_subset_pkl_filepath
output_target_extraction_report_csv_filepath = parameters.output_target_extraction_report_csv_filepath
output_final_report_csv_filepath = parameters.output_final_report_csv_filepath
def match_opinion_words(content, opinion_word_lexicon):
opinion_words = []
for opinion in opinion_word_lexicon:
for token in content.split():
if token == opinion: opinion_words.append(token)
return list(set(opinion_words))
def save_extracted_pattern_results(domain, k, pattern_type, pattern_counter, err_list):
pattern_list = [tup for tup in pattern_counter.items()]
pattern_df = pd.DataFrame(pattern_list, columns =['pattern', 'count'])
filepath = output_pattern_csv_filepath % (domain, k, pattern_type, len(pattern_df))
pattern_df.to_csv(filepath, index = False, encoding='utf-8-sig')
print('Created %s' % filepath)
err_df = pd.DataFrame(err_list, columns =['content', 'current_opinion_word', 'current_target_word', 'parse_error', 'opinion_words', 'targets', 'raw_targets'])
filepath = output_error_csv_filepath % (domain, k, pattern_type, len(err_df[err_df['parse_error']==True]), len(err_df))
err_df.to_csv(filepath, index = False, encoding='utf-8-sig')
print('Created %s' % filepath)
def pattern_extraction(domain, k, pattern_type, df, pattern_handler, dependency_handler):
print('[%s k=%d %s]' % (domain, k, pattern_type))
pattern_counter, err_list = defaultdict(int), list()
if pattern_type == 'ot': pattern_handler.extract_patterns_ot(df, pattern_counter, err_list, dependency_handler)
elif pattern_type == 'tt': pattern_handler.extract_patterns_tt(df, pattern_counter, err_list, dependency_handler)
save_extracted_pattern_results(domain, k, pattern_type, pattern_counter, err_list)
return pattern_counter
def merge_dfs(data_filepaths):
dfs = []
for data_filepath in data_filepaths:
df = pd.read_csv(data_filepath)
dfs.append(df)
return pd.concat(dfs, ignore_index=True)
def pattern_quality_estimation(domain, k, pattern_type, original_df, pattern_counter, pattern_handler, dependency_handler):
print('Processing pattern_quality_estimation for [%s] (# of patterns = %d)..' % (domain, len(pattern_counter)))
idx = 0
for one_flattened_dep_rels, pattern_count in sorted(pattern_counter.items(), key=lambda x: x[-1], reverse=True):
idx += 1
filepath = output_target_log_csv_filepath % (domain, k, pattern_type, pattern_count, one_flattened_dep_rels)
if os.path.exists(filepath): continue
print('[%d/%d]Evaluating quality of pattern %s' % (idx, len(pattern_counter.keys()), one_flattened_dep_rels))
dep_rels = one_flattened_dep_rels.split('-')
df = copy.deepcopy(original_df)
if pattern_type == 'ot': df['predicted_targets'] = df.parallel_apply(lambda x: pattern_handler.extract_targets(x['doc'], x['opinion_words'], dep_rels, dependency_handler), axis=1)
elif pattern_type == 'tt': df['predicted_targets'] = df.parallel_apply(lambda x: pattern_handler.extract_targets(x['doc'], x['targets'], dep_rels, dependency_handler), axis=1)
df['pattern'] = one_flattened_dep_rels
df['pattern_count'] = pattern_count
df.drop(['filename', 'doc'], axis=1, inplace=True)
df.to_csv(filepath, index = False, encoding='utf-8-sig')
print('Created %s' % filepath)
filepath = output_targets_concat_csv_filepath % (domain, k, pattern_type)
if os.path.exists(filepath): concat_df = pd.read_csv(filepath)
else:
print('Merging csv files in %s for [%s]..' % (output_targets_dir, domain))
concat_df = merge_dfs(glob.glob(os.path.join(output_targets_dir, '%s_k=%d_%s*.csv' % (domain, k, pattern_type))))
concat_df.to_csv(filepath, index = False, encoding='utf-8-sig')
print('Created %s' % filepath)
concat_df['targets'] = concat_df.apply(lambda x: ast.literal_eval(x['targets']), axis=1)
concat_df['predicted_targets'] = concat_df.apply(lambda x: ast.literal_eval(x['predicted_targets']), axis=1)
print('Evaluating rules for [%s k=%d]..' % (domain, k))
filepath = output_pattern_quality_estimation_csv_filepath % (domain, k)
if not os.path.exists(filepath):
f = open(filepath, 'w', encoding='utf-8-sig')
wr = csv.writer(f)
wr.writerow(['domain', 'pattern', 'count', 'precision_multiple', 'recall_multiple', 'f1_multiple', 'precision_distinct', 'recall_distinct', 'f1_distinct'])
for pattern in concat_df['pattern'].unique():
current_df = concat_df[concat_df['pattern']==pattern]
pre_mul, rec_mul, pre_dis, rec_dis = calculate_precision_recall(current_df)
wr.writerow([domain, pattern, '%d'%current_df['pattern_count'].iloc[0], '%.2f'%pre_mul, '%.2f'%rec_mul, '%.2f'%calculate_f1(pre_mul,rec_mul), '%.2f'%pre_dis, '%.2f'%rec_dis, '%.2f'%calculate_f1(pre_dis,rec_dis)])
f.close()
print('Created %s' % filepath)
pattern_evaluation_df = pd.read_csv(filepath)
print('Loaded %s' % filepath)
return concat_df, pattern_evaluation_df
def evaluate_rule_set(original_df, selected_pattern_list, pattern_handler, dependency_handler):
df = copy.deepcopy(original_df)
df['predicted_targets'] = df.apply(lambda x: list(), axis=1)
for one_flattened_dep_rels in selected_pattern_list:
dep_rels = one_flattened_dep_rels.split('-')
df['predicted_targets'] = df.apply(lambda x: pattern_handler.extract_targets(x['doc'], x['opinion_words'], dep_rels, dependency_handler, x['predicted_targets']), axis=1)
pre_mul, rec_mul, pre_dis, rec_dis = calculate_precision_recall(df)
f1_mul = calculate_f1(pre_mul,rec_mul)
f1_dis = calculate_f1(pre_dis,rec_dis)
return pre_mul, rec_mul, f1_mul, f1_dis
def evaluate_rule_set_f1(original_df, selected_pattern_list, pattern_handler, dependency_handler):
df = copy.deepcopy(original_df)
df['predicted_targets'] = df.apply(lambda x: list(), axis=1)
for one_flattened_dep_rels in selected_pattern_list:
dep_rels = one_flattened_dep_rels.split('-')
df['predicted_targets'] = df.apply(lambda x: pattern_handler.extract_targets(x['doc'], x['opinion_words'], dep_rels, dependency_handler, x['predicted_targets']), axis=1)
pre_mul, rec_mul, pre_dis, rec_dis = calculate_precision_recall(df)
f1_mul = calculate_f1(pre_mul,rec_mul)
return f1_mul
def evaluate_rule_set_pre(original_df, selected_pattern_list, pattern_handler, dependency_handler):
df = copy.deepcopy(original_df)
df['predicted_targets'] = df.apply(lambda x: list(), axis=1)
for one_flattened_dep_rels in selected_pattern_list:
dep_rels = one_flattened_dep_rels.split('-')
df['predicted_targets'] = df.apply(lambda x: pattern_handler.extract_targets(x['doc'], x['opinion_words'], dep_rels, dependency_handler, x['predicted_targets']), axis=1)
pre_mul, rec_mul, pre_dis, rec_dis = calculate_precision_recall(df)
f1_mul = calculate_f1(pre_mul,rec_mul)
return pre_mul
def pick_least_redundant_one_pattern(selected_pattern_list, subset_handler):
print('Picking out the least redundant one pattern against %s.. ' % str(selected_pattern_list))
x1 = subset_handler.evaluate_patterns_tp(selected_pattern_list)['tp'].values.reshape(-1,1)
redundancy_degree_score_df = pd.DataFrame([candidate_pattern for candidate_pattern in subset_handler.pattern_list if candidate_pattern not in selected_pattern_list], columns=['candidate_pattern'])
redundancy_degree_score_df['redundancy_score'] = redundancy_degree_score_df.parallel_apply(lambda row: mutual_info_classif(x1, subset_handler.evaluate_patterns_tp([*selected_pattern_list, row['candidate_pattern']])['tp'].values.reshape(-1,1), discrete_features=[0]), axis=1)
min_row = redundancy_degree_score_df.sort_values(by='redundancy_score').iloc[0]
min_redundant_pattern = min_row['candidate_pattern']
min_redundancy_degree_score = min_row['redundancy_score']
return min_redundant_pattern, min_redundancy_degree_score
def pick_one_pattern(selected_pattern_list, subset_handler, original_df, pattern_handler, dependency_handler, config_option):
print('Picking out the least redundant one pattern against %s.. ' % str(selected_pattern_list))
x1 = subset_handler.evaluate_patterns_tp(selected_pattern_list)['tp'].values.reshape(-1,1)
redundancy_degree_score_df = pd.DataFrame([candidate_pattern for candidate_pattern in subset_handler.pattern_list if candidate_pattern not in selected_pattern_list], columns=['candidate_pattern'])
redundancy_degree_score_df['redundancy_score'] = redundancy_degree_score_df.parallel_apply(lambda row: mutual_info_classif(x1, subset_handler.evaluate_patterns_tp([*selected_pattern_list, row['candidate_pattern']])['tp'].values.reshape(-1,1), discrete_features=[0]), axis=1)
if config_option == 'f1mi':
redundancy_degree_score_df['criteria'] = redundancy_degree_score_df.parallel_apply(lambda row: evaluate_rule_set_f1(original_df, [*selected_pattern_list, row['candidate_pattern']], pattern_handler, dependency_handler), axis=1)
elif config_option == 'premi':
redundancy_degree_score_df['criteria'] = redundancy_degree_score_df.parallel_apply(lambda row: evaluate_rule_set_pre(original_df, [*selected_pattern_list, row['candidate_pattern']], pattern_handler, dependency_handler), axis=1)
redundancy_degree_score_df['criteria/mi'] = redundancy_degree_score_df.progress_apply(lambda x: x['criteria']/x['redundancy_score'], axis=1)
min_row = redundancy_degree_score_df.sort_values(by='criteria/mi', ascending=False).iloc[0]
min_redundant_pattern = min_row['candidate_pattern']
min_redundancy_degree_score = min_row['redundancy_score']
return min_redundant_pattern, min_redundancy_degree_score
def pattern_subset_selection(domain, k, pattern_type, selected_pattern_list, original_df, subset_handler, pattern_handler, dependency_handler):
print('Processing subset selection for [%s k=%d]..' % (domain, k))
pkl_filepath = output_subset_pkl_filepath % (domain, k, pattern_type)
if 1==2: print('pass') # os.path.exists(pkl_filepath): best_subset = load_pkl(pkl_filepath)
else:
best_f1_mul_list, best_f1_dis_list, best_subset_list = [], [], []
best_f1_mul, best_f1_dis, best_subset = 0, 0, []
_, _, f1_mul, f1_dis = evaluate_rule_set(original_df, selected_pattern_list, pattern_handler, dependency_handler)
content = "Selected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (str(selected_pattern_list), f1_mul, f1_dis)
print("[%s]Selected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (domain, str(selected_pattern_list), f1_mul, f1_dis))
picked_cnt, num_decreased = 0, 0
while True:
best_f1_mul, best_f1_dis, best_subset = f1_mul, f1_dis, copy.deepcopy(selected_pattern_list)
best_f1_mul_list.append(best_f1_mul)
best_f1_dis_list.append(best_f1_dis)
best_subset_list.append(best_subset)
picked_cnt += 1
if config_option != '':
min_redundant_pattern, mi_score = pick_one_pattern(selected_pattern_list, subset_handler, original_df, pattern_handler, dependency_handler, config_option)
else:
min_redundant_pattern, mi_score = pick_least_redundant_one_pattern(selected_pattern_list, subset_handler)
content += "\nLeast redundant pattern = %s [Redundancy score (MI score) = %.4f]" % (min_redundant_pattern, mi_score)
print("[%s]Least redundant pattern = %s [Redundancy score (MI score) = %.4f]" % (domain, min_redundant_pattern, mi_score))
selected_pattern_list.append(min_redundant_pattern)
_, _, f1_mul, f1_dis = evaluate_rule_set(original_df, selected_pattern_list, pattern_handler, dependency_handler)
content += "\n\nSelected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (str(selected_pattern_list), f1_mul, f1_dis)
print("[%s]Selected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (domain, str(selected_pattern_list), f1_mul, f1_dis))
if len(selected_pattern_list) == len(subset_handler.pattern_list): break
if max_pick_count != 0 and picked_cnt == max_pick_count: break
if max_pick_count == 0 and f1_mul < best_f1_mul:
num_decreased += 1
if num_decreased > allow_f1_decrease_count:
break
if max_pick_count == 0 and f1_mul >= best_f1_mul:
num_decreased = 0
if allow_f1_decrease_count > 0:
idx = 1 + allow_f1_decrease_count
best_f1_mul = best_f1_mul_list[-idx]
best_f1_dis = best_f1_dis_list[-idx]
best_subset = best_subset_list[-idx]
else:
best_f1_mul = best_f1_mul_list[-1]
best_f1_dis = best_f1_dis_list[-1]
best_subset = best_subset_list[-1]
content += "\n\n<Best> Selected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (str(best_subset), best_f1_mul, best_f1_dis)
print("[%s]<Best> Selected pattern list = %s \n\tF1 (multiple): %.4f\tF1 (distinct): %.4f" % (domain, str(best_subset), best_f1_mul, best_f1_dis))
filepath = output_subset_selection_log_filepath % (domain, k, pattern_type)
text_file = open(filepath, "w", encoding='utf-8')
text_file.write(content)
text_file.close()
print('Created %s' % filepath)
save_pkl(best_subset, pkl_filepath)
return best_subset
def main():
kf, kfold_results = KFold(n_splits=10), defaultdict(lambda: [])
pattern_handler, dependency_handler = PatternHandler(), DependencyGraphHandler()
for domain in domains: # raw_df['domain'].unique()
output_raw_df_pkl_filepath = output_raw_df_pkl_filepath_ % domain
if os.path.exists(output_raw_df_pkl_filepath): raw_df = load_pkl(output_raw_df_pkl_filepath)
else:
raw_df_ = | pd.read_json(data_filepath) | pandas.read_json |
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
from numpy import linalg as la
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection as sk_ms
from sklearn.metrics import confusion_matrix
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
import random
import matplotlib as mpl
import matplotlib.pyplot as plt
class GraphImporter:
"""
class that responsible to import or create the relevant graph
"""
def __init__(self, args):
self.data_name = args.data_name
@staticmethod
def import_imdb_multi_graph(weights):
"""
Make our_imdb multi graph using class
:param weights:
:return:
"""
from IMDb_data_preparation_E2V import MoviesGraph
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels)
multi_gnx = imdb.weighted_multi_graph(gnx, knowledge_gnx, labels, weights_dict)
return multi_gnx
@staticmethod
def import_imdb_weighted_graph(weights):
from IMDb_data_preparation_E2V import MoviesGraph
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels)
weighted_graph = imdb.weighted_graph(gnx, knowledge_gnx, labels, weights_dict)
return weighted_graph
def import_graph(self):
graph = nx.MultiGraph()
data_path = self.data_name + '.txt'
path = os.path.join(self.data_name, data_path)
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
att1 = str(items[0][0])
att2 = str(items[1][0])
graph.add_node(items[0], key=att1)
graph.add_node(items[1], key=att2)
sort_att = np.array([att1, att2])
sort_att = sorted(sort_att)
graph.add_edge(items[0], items[1], key=str(sort_att[0]) + str(sort_att[1]))
return graph
class EmbeddingCreator(object):
def __init__(self, graph=None, dimension=None, args=None):
self.data_name = args.data_name
self.dim = dimension
self.graph = graph
def create_node2vec_embeddings(self):
# path1 = os.path.join(self.data_name, 'Node2Vec_embedding.pickle')
# path2 = os.path.join(self.data_name, 'Node2Vec_embedding.csv')
# if os.path.exists(path1):
# with open(path1, 'rb') as handle:
# dict_embeddings = pickle.load(handle)
# elif os.path.exists(path2):
# embedding_df = pd.read_csv(path2)
# dict_embeddings = embedding_df.to_dict(orient='list')
# with open(path2, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
# else:
# node2vec = Node2Vec(self.graph, dimensions=16, walk_length=30, num_walks=200, workers=1)
# model = node2vec.fit()
# nodes = list(self.graph.nodes())
# dict_embeddings = {}
# for i in range(len(nodes)):
# dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(nodes[i]))})
# with open(path1, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
node2vec = Node2Vec(self.graph, dimensions=self.dim, walk_length=30, num_walks=200, workers=1)
model = node2vec.fit()
nodes = list(self.graph.nodes())
dict_embeddings = {}
for i in range(len(nodes)):
dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(nodes[i]))})
return dict_embeddings
def create_event2vec_embeddings(self):
data_path = self.data_name + '_e2v_embeddings.txt'
path = os.path.join(self.data_name, data_path)
cond = 0
dict_embeddings = {}
with open(path, 'r') as f:
for line in f:
if cond == 1:
items = line.strip().split()
dict_embeddings[items[0]] = items[1:]
cond = 1
return dict_embeddings
def create_oger_embeddings(self):
from oger_embedding.for_nni import StaticEmbeddings
static_embeddings = StaticEmbeddings(name='pw', graph=self.graph, dim=self.dim)
dict_embeddings = static_embeddings.dict_embedding
return dict_embeddings
class TopKRanker(OneVsRestClassifier):
"""
Linear regression with one-vs-rest classifier
"""
def predict_kfir(self, x, top_k_list):
assert x.shape[0] == len(top_k_list)
probs = super(TopKRanker, self).predict_proba(x)
prediction = np.zeros((x.shape[0], self.classes_.shape[0]))
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-int(k):]].tolist()
for label in labels:
prediction[i, int(label)] = 1
return prediction, probs
class EdgesPreparation:
def __init__(self, graph, multi_graph, args):
self.args = args
self.multi_graph = multi_graph
self.graph = graph
self.label_edges = self.make_label_edges()
def make_label_edges(self):
"""
Make a list with all the edge from type "labels_edges", i.e. edges between a movie and its class.
:return: list with labels_edges
"""
data_path = self.args.data_name + '_true_edges.pickle'
nodes = list(self.multi_graph.nodes)
label_edges = []
for node in nodes:
info = self.multi_graph._adj[node]
neighs = list(info.keys())
for neigh in neighs:
if info[neigh][0]['key'] == 'labels_edges':
label_edges.append([node, neigh])
try:
with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:
pickle.dump(label_edges, handle, protocol=3)
except:
pass
return label_edges
def label_edges_classes_ordered(self):
"""
Make a dict of classes and their labels_edges they belong to. For every label_edge
there is only one class it belongs to.
:return: a dict of classes and their labels_edges
"""
dict_class_label_edge = {}
for edge in self.label_edges:
if edge[0][0] == 'c':
label = edge[0]
else:
label = edge[1]
if dict_class_label_edge.get(label) is not None:
edges = dict_class_label_edge[label]
edges.append(edge)
dict_class_label_edge[label] = edges
else:
dict_class_label_edge.update({label: [edge]})
return dict_class_label_edge
def unseen_edges(self):
unseen_edges = []
dict_true_edges = self.label_edges_classes_ordered()
classes = list(dict_true_edges.keys())
for i, k in enumerate(sorted(dict_true_edges, key=lambda x: len(dict_true_edges[x]), reverse=True)):
classes[i] = k
unseen_classes = classes[int(0.8 * len(classes)):]
for c in unseen_classes:
unseen_edges.append(dict_true_edges[c])
return unseen_edges
def seen_graph(self):
unseen_edges = self.unseen_edges()
graph = self.graph
for edge in unseen_edges:
graph.remove_edge(edge[0][0], edge[0][1])
return graph
def make_false_label_edges(self, dict_class_label_edge):
"""
Make a dictionary of classes and false 'labels_edges' i.e. 'labels_edges' that do not exist.
The number of false 'labels_edges' for each class in the dictionary is false_per_true times the true
'labels_edges' of the class.
In addition, to be more balance the function take randomly false 'labels_edges' but the number of
false 'label_edges' corresponding to each class is similar.
# We need the false 'labels_edges' to be a false instances to the classifier.
:param dict_class_label_edge
:return: a dict of classes and their false labels_edges.
"""
data_path = self.args.data_name + '_false_edges_balanced_{}.pickle'.format(self.args.false_per_true)
dict_class_false_edges = {}
labels = list(dict_class_label_edge.keys())
false_labels = []
for label in labels:
for edge in dict_class_label_edge[label]:
if edge[0][0] == 'c':
label = edge[0]
movie = edge[1]
else:
label = edge[1]
movie = edge[0]
if len(false_labels) < self.args.false_per_true + 1:
false_labels = list(set(labels) - set(label))
else:
false_labels = list(set(false_labels) - set(label))
indexes = random.sample(range(1, len(false_labels)), self.args.false_per_true)
for i, index in enumerate(indexes):
if dict_class_false_edges.get(label) is None:
dict_class_false_edges[label] = [[movie, false_labels[index]]]
else:
edges = dict_class_false_edges[label]
edges.append([movie, false_labels[index]])
dict_class_false_edges[label] = edges
false_labels = list(np.delete(np.array(false_labels), indexes))
try:
with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:
pickle.dump(dict_class_false_edges, handle, protocol=3)
except:
pass
return dict_class_false_edges
class Classifier:
def __init__(self, dict_true_edges, dict_false_edges, dict_projections, embedding, args):
self.args = args
self.embedding = embedding
self.dict_true_edges = dict_true_edges
self.dict_false_edges = dict_false_edges
self.norm = set(args.norm)
self.dict_projections = dict_projections
def edge_distance(self, edge):
"""
Calculate the distance of an edge. Take the vertices of the edge and calculate the distance between their
embeddings.
We use to calculate The distance with L1, l2, Cosine Similarity.
:param edge: the edge we want to find its distance.
:return: The distance
"""
embd1 = np.array(self.dict_projections[edge[0]]).astype(float)
embd2 = np.array(self.dict_projections[edge[1]]).astype(float)
if self.norm == set('L1 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('L2 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('cosine'):
norm = cosine_similarity(embd1.reshape(1, -1), embd2.reshape(1, -1))[0]
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
return norm
def calculate_classifier_value(self, true_edges, false_edges):
"""
Create x and y for Logistic Regression Classifier.
self.dict_projections: A dictionary of all nodes embeddings, where keys==nodes and values==embeddings
:param true_edges: A list of true edges.
:param false_edges: A list of false edges.
:return: x_true/x_false - The feature matrix for logistic regression classifier, of true/false edge.
The i'th row is the norm score calculated for each edge.
y_true_edge/y_false_edge - The edges labels, [1,0] for true/ [0,1] for false.
Also the edge of the label is concatenate to the label.
"""
x_true, x_false = np.zeros(shape=(len(true_edges), 1)), np.zeros(shape=(len(false_edges), 1))
y_true_edge, y_false_edge = np.zeros(shape=(len(true_edges), 4)).astype(int).astype(str), \
np.zeros(shape=(len(false_edges), 4)).astype(int).astype(str)
for i, edge in enumerate(true_edges):
norm = self.edge_distance(edge)
x_true[i, 0] = norm
y_true_edge[i, 2] = edge[0]
y_true_edge[i, 3] = edge[1]
y_true_edge[i, 0] = str(1)
for i, edge in enumerate(false_edges):
norm = self.edge_distance(edge)
x_false[i, 0] = norm
y_false_edge[i, 2] = edge[0]
y_false_edge[i, 3] = edge[1]
y_false_edge[i, 1] = str(1)
return x_true, x_false, y_true_edge, y_false_edge
@staticmethod
def train_edge_classification(x_train, y_train):
"""
train the classifier with the train set.
:param x_train: The features' edge- norm (train set).
:param y_train: The edges labels- 0 for true, 1 for false (train set).
:return: The classifier
"""
classif2 = TopKRanker(LogisticRegression())
classif2.fit(x_train, y_train)
return classif2
@staticmethod
def split_data(x_true, x_false, y_true_edge, y_false_edge, ratio):
"""
split the data into rain and test for the true edges and the false one.
:param ratio: determine the train size.
:return: THe split data
"""
x_train_true, x_test_true, y_train_true_edge, y_test_true_edge = sk_ms.train_test_split(
x_true, y_true_edge, test_size=1 - ratio)
x_train_false, x_test_false, y_train_false_edge, y_test_false_edge = sk_ms.train_test_split(
x_false, y_false_edge, test_size=1 - ratio)
true_edges_test_source = y_test_true_edge.T[2].reshape(-1, 1)
true_edges_test_target = y_test_true_edge.T[3].reshape(-1, 1)
x_train, x_test, y_train_edge, y_test_edge = np.concatenate((x_train_true, x_train_false), axis=0), \
np.concatenate((x_test_true, x_test_false), axis=0), \
np.concatenate((y_train_true_edge, y_train_false_edge), axis=0), \
np.concatenate((y_test_true_edge, y_test_false_edge), axis=0)
y_train = np.array([y_train_edge.T[0].reshape(-1, 1), y_train_edge.T[1].reshape(-1, 1)]).T.reshape(-1,
2).astype(
int)
true_edges_test = np.array([true_edges_test_source, true_edges_test_target]).T[0]
return x_train, y_train, true_edges_test
def train(self):
"""
Prepare the data for train, also train the classifier and make the test data divide by classes.
:return: The classifier and dict_class_movie_test
"""
path1 = os.path.join(self.args.data_name, f'train/classifier23_{self.embedding}_{self.args.norm}.pkl')
path2 = os.path.join(self.args.data_name, f'train/dict_{self.embedding}_{self.args.norm}.pkl')
classes = list(self.dict_true_edges.keys())
for i, k in enumerate(sorted(self.dict_true_edges, key=lambda x: len(self.dict_true_edges[x]), reverse=True)):
classes[i] = k
dict_class_movie_test = {}
x_train_all, y_train_all = np.array([]), np.array([])
seen_classes = classes[:int(0.8 * len(classes))]
unseen_classes = classes[int(0.8 * len(classes)):]
classif2 = None
for j in range(len(self.args.ratio)):
for c in seen_classes:
dict_movie_edge = {}
x_true, x_false, y_true_edge, y_false_edge = \
self.calculate_classifier_value(self.dict_true_edges[c], self.dict_false_edges[c])
x_train, y_train, true_edges_test = self.split_data(x_true, x_false, y_true_edge, y_false_edge,
self.args.ratio[j])
for edge in true_edges_test:
if edge[0][0] == 't':
movie = edge[0]
else:
movie = edge[1]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
if len(x_train_all) > 0:
x_train_all = np.concatenate((x_train_all, x_train), axis=0)
y_train_all = np.concatenate((y_train_all, y_train), axis=0)
else:
x_train_all = x_train
y_train_all = y_train
for c in unseen_classes:
dict_movie_edge = {}
x_true, x_false, y_true_edge, y_false_edge = \
self.calculate_classifier_value(self.dict_true_edges[c], self.dict_false_edges[c])
_, _, true_edges_test = self.split_data(x_true, x_false, y_true_edge, y_false_edge, ratio=0)
for edge in true_edges_test:
if edge[0][0] == 't':
movie = edge[0]
else:
movie = edge[1]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
shuff = np.c_[x_train_all.reshape(len(x_train_all), -1), y_train_all.reshape(len(y_train_all), -1)]
np.random.shuffle(shuff)
x_train_all = shuff.T[0].reshape(-1, 1)
y_train_all = np.array([shuff.T[1].reshape(-1, 1), shuff.T[2].reshape(-1, 1)]).T.reshape(-1, 2).astype(
int)
classif2 = self.train_edge_classification(np.array(x_train_all), np.array(y_train_all))
with open(path1, 'wb') as fid:
pickle.dump(classif2, fid)
with open(path2, 'wb') as fid:
pickle.dump(dict_class_movie_test, fid)
return classif2, dict_class_movie_test
@staticmethod
def predict_edge_classification(classif2, x_test):
"""
With the test data make
:param classif2:
:param x_test:
:return:
"""
top_k_list = list(np.ones(len(x_test)).astype(int))
prediction, probs = classif2.predict_kfir(x_test, top_k_list)
return prediction, probs
def evaluate(self, classif2, dict_class_movie_test):
# evaluate
classes = list(self.dict_true_edges.keys())
pred_true = []
pred = []
for i, k in enumerate(sorted(self.dict_true_edges, key=lambda x: len(self.dict_true_edges[x]), reverse=True)):
classes[i] = k
num_classes = len(classes)
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for c in classes:
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
for i, edge in enumerate(edges):
norm = self.edge_distance(edge)
class_test[i, 0] = norm
_, probs = self.predict_edge_classification(classif2, class_test)
pred_index = np.argmax(probs.T[0])
prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if prediction[0][0] == 'c':
pred.append(prediction[0])
else:
pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
# TODO dict class measures for every ratio
return dict_class_measures, pred, pred_true
def confusion_matrix_maker(self, dict_class_measures, pred, pred_true):
conf_matrix = confusion_matrix(pred_true, pred, labels=list(dict_class_measures.keys()))
seen_true_count = 0
seen_count = 0
unseen_true_count = 0
unseen_count = 0
seen_number = int(0.8 * len(conf_matrix))
for i in range(len(conf_matrix))[:seen_number]:
seen_true_count += conf_matrix[i][i]
for j in range(len(conf_matrix)):
seen_count += conf_matrix[i][j]
for i in range(len(conf_matrix))[seen_number:]:
unseen_true_count += conf_matrix[i][i]
for j in range(len(conf_matrix)):
unseen_count += conf_matrix[i][j]
accuracy = (seen_true_count + unseen_true_count) / (seen_count + unseen_count)
seen_accuracy = seen_true_count / seen_count
unseen_accuracy = unseen_true_count / unseen_count
print(f'acc_all: {accuracy}')
print(f'acc_all_seen: {seen_accuracy}')
print(f'acc_all_unseen: {unseen_accuracy}')
plt.figure(1)
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['axes.labelsize'] = 16
plt.title('Confusion Matrix, ZSL OUR_IMDB')
plt.xlabel("True Labels")
plt.ylabel("Predicted Labels")
plt.imshow(conf_matrix, cmap='gist_gray', vmin=0, vmax=2)
plt.colorbar()
plt.savefig(f'{self.args.data_name}/plots/confusion_matrix_{self.embedding}_{self.args.norm}')
return accuracy, seen_accuracy, unseen_accuracy
def obj_func(weights):
"""
Main Function for link prediction task.
:return:
"""
np.random.seed(0)
print(weights)
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default='our_imdb')
parser.add_argument('--norm', default='cosine') # cosine / L2 Norm / L1 Norm
parser.add_argument('--embedding', default='Node2Vec') # Node2Vec / Event2Vec / OGRE
parser.add_argument('--false_per_true', default=10)
parser.add_argument('--ratio', default=[0.8])
args = parser.parse_args()
# ratio_arr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
graph_maker = GraphImporter(args)
multi_graph = graph_maker.import_imdb_multi_graph(weights)
weighted_graph = graph_maker.import_imdb_weighted_graph(weights)
edges_preparation = EdgesPreparation(weighted_graph, multi_graph, args)
dict_true_edges = edges_preparation.label_edges_classes_ordered()
dict_false_edges = edges_preparation.make_false_label_edges(dict_true_edges)
graph = edges_preparation.seen_graph()
embeddings_maker = EmbeddingCreator(graph, args)
if args.embedding == 'Node2Vec':
dict_embeddings = embeddings_maker.create_node2vec_embeddings()
elif args.embedding == 'Event2Vec':
dict_embeddings = embeddings_maker.create_event2vec_embeddings()
elif args.embeddings == 'Oger':
dict_embeddings = embeddings_maker.create_oger_embeddings()
else:
raise ValueError(f"Wrong embedding name, {args.embedding}")
classifier = Classifier(dict_true_edges, dict_false_edges, dict_embeddings, args)
classif, dict_class_movie_test = classifier.train()
dict_class_measures_node2vec, pred, pred_true = classifier.evaluate(classif, dict_class_movie_test)
accuracy, seen_accuracy, unseen_accuracy = classifier.confusion_matrix_maker(
dict_class_measures_node2vec, pred, pred_true)
try:
values = | pd.read_csv('our_imdb/train/optimaize_values_Node2Vec_l2.csv') | pandas.read_csv |
import click
import logging
import os
import pandas as pd
from tqdm import tqdm
from rxnmapper import RXNMapper
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'--file_path',
'-f',
help='Input file path to csv, tsv or json with "rxn" column'
)
@click.option('--output_path', '-o', help='Output file path')
@click.option('--batch_size', '-bs', default=1, help='Batch size')
@click.option(
'--canonicalize/--no_canonicalize',
default=True,
help='Canonicalize inputs (default: True)'
)
@click.option(
'--detailed',
'-d',
default=False,
is_flag=True,
help='Return detailed output'
)
def main(
file_path: str, output_path: str, batch_size: int, canonicalize: bool,
detailed: bool
) -> None:
if file_path.endswith('.json'):
df = pd.read_json(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.csv'):
df = pd.read_csv(file_path)
else:
ValueError('Unrecognized file type')
df.reset_index(inplace=True)
rxn_mapper = RXNMapper()
results = []
rxns = []
for i, row in tqdm(df.iterrows(), total=len(df)):
rxns.append(row['rxn'])
if (i + 1) % batch_size == 0:
results += rxn_mapper.get_attention_guided_atom_maps(
rxns, canonicalize_rxns=canonicalize, detailed_output=detailed
)
rxns = []
if rxns:
results += rxn_mapper.get_attention_guided_atom_maps(
rxns, canonicalize_rxns=canonicalize
)
results_df = | pd.DataFrame(results) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = | Series(vals1) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import os
import re
import string
from math import log
from pathlib import Path
from typing import List
from transformers import pipeline
import nltk
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from lib.config import DataConfig
from vnnlp.vietnamnlp import vn_tokenize
def prep_data(remake: bool) -> DataConfig:
curr_directory = str(Path.cwd())
data_directory = curr_directory + "/data"
stock_directory = Path(data_directory + "/stock-data")
try:
for f in stock_directory.rglob('*-data.csv'):
# TODO: Support multiples csv files
stock_data = f
print('Stock data used:', f)
except:
print('There is no .csv stock data')
try:
for n in stock_directory.rglob('*-news.json'):
news_data = n
print('News data used:', n)
except:
print('There is no .json news')
try:
for n in stock_directory.rglob('*-eps.csv'):
eps_data = n
print('Eps data used:', n)
except:
print('There is no .csv eps data')
train_path = data_directory + '/train' + '/train_data.npy'
test_path = data_directory + '/test' + '/test_data.npy'
val_path = data_directory + '/val' + '/validation_data.npy'
feature = ['Close', 'sentiment', 'vn_embedding', 'Report_EPS', 'Datetime', 'Label']
start_date = '2009-12-31'
end_date = '2020-04-21'
train_index = 1500
test_index = 2500
def _sentiment_transformer(text_df: pd.DataFrame) -> List[int]:
print('Starting sentiment-analysis...')
nlp = pipeline('sentiment-analysis')
length = len(text_df['en_text'])
dummy = []
for i in range(length):
if text_df['en_text'][i] is not None:
ret = nlp(text_df['en_text'][i])
if ret[0]['label'] == 'NEGATIVE':
dummy.append(-1)
elif ret[0]['label'] == 'POSITIVE':
dummy.append(1)
else:
dummy.append(0)
else:
dummy.append(0)
print('Completing sentiment-analysis...')
return dummy
def _create_label(csv_location: str) -> None:
df = | pd.read_csv(csv_location) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 = pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
other_feature3.to_csv('data/other_feature3.csv',index=None)
print(other_feature3.shape)
#for dataset2
t = dataset2[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset2[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset2[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset2[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset2[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset2[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset2[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset2[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature2 = pd.merge(t1,t,on='user_id')
other_feature2 = pd.merge(other_feature2,t3,on=['user_id','coupon_id'])
other_feature2 = pd.merge(other_feature2,t4,on=['user_id','date_received'])
other_feature2 = pd.merge(other_feature2,t5,on=['user_id','coupon_id','date_received'])
other_feature2 = pd.merge(other_feature2,t7,on=['user_id','coupon_id','date_received'])
other_feature2.to_csv('data/other_feature2.csv',index=None)
print(other_feature2.shape)
#for dataset1
t = dataset1[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset1[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset1[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset1[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received.astype('int')
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received.astype('int') - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset1[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset1[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset1[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset1[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature1 = pd.merge(t1,t,on='user_id')
other_feature1 = pd.merge(other_feature1,t3,on=['user_id','coupon_id'])
other_feature1 = pd.merge(other_feature1,t4,on=['user_id','date_received'])
other_feature1 = pd.merge(other_feature1,t5,on=['user_id','coupon_id','date_received'])
other_feature1 = | pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received']) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:47:30 2018
@author: SilverDoe
"""
#============ Selecting a column ==============================================
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df['one'])
#=========== Adding a column ==================================================
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
# Adding a new column to an existing DataFrame object with column label by passing new series
print ("Adding a new column by passing as Series:")
df['three']=pd.Series([10,20,30],index=['a','b','c'])
print(df)
print ("Adding a new column using the existing columns in DataFrame:")
df['four']=df['one']+df['three']
# adding multiple columns using the new assign function
df = df.assign(five=(df['four']+df['three']),six=(df['one']+df['four']))
print(df)
#=========== Deleting a column ================================================
# Using the previous DataFrame, we will delete a column
# using del function
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']),
'three' : pd.Series([10,20,30], index=['a','b','c'])}
df = | pd.DataFrame(d) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from scipy.stats import spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def bioenv(distance_matrix, data_frame, columns=None):
"""Find subset of variables maximally correlated with distances.
Finds subsets of variables whose Euclidean distances (after scaling the
variables; see Notes section below for details) are maximally
rank-correlated with the distance matrix. For example, the distance matrix
might contain distances between communities, and the variables might be
numeric environmental variables (e.g., pH). Correlation between the
community distance matrix and Euclidean environmental distance matrix is
computed using Spearman's rank correlation coefficient (:math:`\\rho`).
Subsets of environmental variables range in size from 1 to the total number
of variables (inclusive). For example, if there are 3 variables, the "best"
variable subsets will be computed for subset sizes 1, 2, and 3.
The "best" subset is chosen by computing the correlation between the
community distance matrix and all possible Euclidean environmental distance
matrices at the given subset size. The combination of environmental
variables with maximum correlation is chosen as the "best" subset.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
data_frame : pandas.DataFrame
Contains columns of variables (e.g., numeric environmental variables
such as pH) associated with the objects in `distance_matrix`. Must be
indexed by the IDs in `distance_matrix` (i.e., the row labels must be
distance matrix IDs), but the order of IDs between `distance_matrix`
and `data_frame` need not be the same. All IDs in the distance matrix
must be present in `data_frame`. Extra IDs in `data_frame` are allowed
(they are ignored in the calculations).
columns : iterable of strs, optional
Column names in `data_frame` to include as variables in the
calculations. If not provided, defaults to all columns in `data_frame`.
The values in each column must be numeric or convertible to a numeric
type.
Returns
-------
pandas.DataFrame
Data frame containing the "best" subset of variables at each subset
size, as well as the correlation coefficient of each.
Raises
------
TypeError
If invalid input types are provided, or if one or more specified
columns in `data_frame` are not numeric.
ValueError
If column name(s) or `distance_matrix` IDs cannot be found in
`data_frame`, if there is missing data (``NaN``) in the environmental
variables, or if the environmental variables cannot be scaled (e.g.,
due to zero variance).
See Also
--------
scipy.stats.spearmanr
Notes
-----
See [1]_ for the original method reference (originally called BIO-ENV).
The general algorithm and interface are similar to ``vegan::bioenv``,
available in R's vegan package [2]_. This method can also be found in
PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
.. warning:: This method can take a *long* time to run if a large number of
variables are specified, as all possible subsets are evaluated at each
subset size.
The variables are scaled before computing the Euclidean distance: each
column is centered and then scaled by its standard deviation.
References
----------
.. [1] <NAME> & Ainsworth, M. 1993. "A method of linking multivariate
community structure to environmental variables". Marine Ecology Progress
Series, 92, 205-219.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
.. [3] http://www.primer-e.com/primer.htm
Examples
--------
Import the functionality we'll use in the following examples:
>>> import pandas as pd
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import bioenv
Load a 4x4 community distance matrix:
>>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
... [0.5, 0.0, 0.1, 0.42],
... [0.25, 0.1, 0.0, 0.33],
... [0.75, 0.42, 0.33, 0.0]],
... ['A', 'B', 'C', 'D'])
Load a ``pandas.DataFrame`` with two environmental variables, pH and
elevation:
>>> df = pd.DataFrame([[7.0, 400],
... [8.0, 530],
... [7.5, 450],
... [8.5, 810]],
... index=['A','B','C','D'],
... columns=['pH', 'Elevation'])
Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
order to link the environmental variables (metadata) to each of the objects
in the distance matrix. In this example, the IDs appear in the same order
in both the distance matrix and data frame, but this is not necessary.
Find the best subsets of environmental variables that are correlated with
community distances:
>>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
size correlation
vars
pH 1 0.771517
pH, Elevation 2 0.714286
We see that in this simple example, pH alone is maximally rank-correlated
with the community distances (:math:`\\rho=0.771517`).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix as input.")
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must provide a pandas.DataFrame as input.")
if columns is None:
columns = data_frame.columns.values.tolist()
if len(set(columns)) != len(columns):
raise ValueError("Duplicate column names are not supported.")
if len(columns) < 1:
raise ValueError("Must provide at least one column.")
for column in columns:
if column not in data_frame:
raise ValueError("Column '%s' not in data frame." % column)
# Subset and order the vars data frame to match the IDs in the distance
# matrix, only keeping the specified columns.
vars_df = data_frame.loc[distance_matrix.ids, columns]
if vars_df.isnull().any().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame, or there is missing data in the "
"data frame.")
try:
vars_df = vars_df.astype(float)
except ValueError:
raise TypeError("All specified columns in the data frame must be "
"numeric.")
# Scale the vars and extract the underlying numpy array from the data
# frame. We mainly do this for performance as we'll be taking subsets of
# columns within a tight loop and using a numpy array ends up being ~2x
# faster.
vars_array = _scale(vars_df).values
dm_flat = distance_matrix.condensed_form()
num_vars = len(columns)
var_idxs = np.arange(num_vars)
# For each subset size, store the best combination of variables:
# (string identifying best vars, subset size, rho)
max_rhos = np.empty(num_vars, dtype=[('vars', object),
('size', int),
('correlation', float)])
for subset_size in range(1, num_vars + 1):
max_rho = None
for subset_idxs in combinations(var_idxs, subset_size):
# Compute Euclidean distances using the current subset of
# variables. pdist returns the distances in condensed form.
vars_dm_flat = pdist(vars_array[:, subset_idxs],
metric='euclidean')
rho = spearmanr(dm_flat, vars_dm_flat)[0]
# If there are ties for the best rho at a given subset size, choose
# the first one in order to match vegan::bioenv's behavior.
if max_rho is None or rho > max_rho[0]:
max_rho = (rho, subset_idxs)
vars_label = ', '.join([columns[i] for i in max_rho[1]])
max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
return | pd.DataFrame.from_records(max_rhos, index='vars') | pandas.DataFrame.from_records |
# Link between theoretic network graph and trajectories
# Map-matching the trajectories to the underlying theoretical network
# Using Leuven Map-matching algorithm
# Start and End node of matched edge in dataframe of trajectories --> link between theoretical network and measured data
from pneumapackage.settings import bb_athens
from pneumapackage.__init__ import path_data, path_results, write_pickle, read_pickle
from pneumapackage.settings import *
import pneumapackage.compassbearing as cpb
import osmnx as ox
import matplotlib.pyplot as plt
import pandas as pd
import geopandas as gpd
import time
import numpy as np
import leuvenmapmatching as lm
from leuvenmapmatching.map.inmem import InMemMap
from leuvenmapmatching.matcher.distance import *
from leuvenmapmatching import visualization as mmviz
import leuvenmapmatching.util.dist_latlon as distlatlon
import leuvenmapmatching.util.dist_euclidean as distxy
import geopy.distance as geodist
from tqdm import tqdm
from tqdm.contrib import tenumerate
from shapely.geometry import Point, LineString
from pyproj import Proj, transform
# import similaritymeasures
from collections import Counter
import rtree
import sys
logger = False
if logger:
logger = lm.logger
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
class MapMatching:
# max initial distance is best chosen as the maximum length of an edge in the network graph
# max(network_edges['length'])
def __init__(self, list_traj, network_obj, max_init, max_d, match_latlon=True):
self.list_traj = list_traj
self.max_init = max_init
self.max_d = max_d
self.network_edges = network_obj.network_edges
self.network_nodes = network_obj.network_nodes
self.graph = network_obj.graph_latlon
self.match_latlon = match_latlon
self.map = self.make_map()
# Create in memory graph for leuvenmapmatching from osmnx graph
# Compatible path type for map matching
def make_map(self, index_edges=True, rtree=True):
map_con = InMemMap("athensmap", use_latlon=self.match_latlon, index_edges=index_edges,
use_rtree=rtree) # Deprecated crs style in algorithm
if self.match_latlon:
for nid, rown in self.network_nodes.iterrows():
map_con.add_node(int(rown.n1), (rown.lat1, rown.lon1))
for eid, rowe in self.network_edges.iterrows():
map_con.add_edge(rowe.n1, rowe.n2)
else:
for nid, rown in self.network_nodes.iterrows():
map_con.add_node(int(rown.n1), (rown.y1, rown.x1))
for eid, rowe in self.network_edges.iterrows():
map_con.add_edge(rowe.n1, rowe.n2)
return map_con
def reset_map(self):
self.map = self.make_map()
def match_fixed_distance(self, list_index=None, logger=False, **kwargs):
if logger:
logger = lm.logger
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
tic = time.time()
traj_mov_match = []
special_cases = []
point_traj = self.list_traj
if list_index is not None:
point_traj = [j for i, j in enumerate(point_traj) if i in list_index]
for i, j in tenumerate(point_traj):
while True:
try:
traj = map_matching(j, self.network_edges, self.map, self.max_init, self.max_d,
latlon=self.match_latlon, **kwargs)
traj = traj.merge(self.network_edges[['_id', 'n1', 'n2']], how='left', on=['n1', 'n2'])
traj_mov_match.append(traj)
break
except Exception:
special_cases.append(j)
break
toc = time.time()
print(f'{int(divmod(toc - tic, 60)[0])} min {int(divmod(toc - tic, 60)[1])} sec')
return traj_mov_match, special_cases
def match_variable_distance(self, list_index=None, logger=False, **kwargs):
if logger:
logger = lm.logger
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
tic = time.time()
traj_mov_match = []
fails = []
point_traj = self.list_traj
if list_index is not None:
point_traj = [j for i, j in enumerate(point_traj) if i in list_index]
for j in tqdm(point_traj):
dist_init = self.max_init
dist = self.max_d
fail = 0
while True:
try:
traj = map_matching(j, self.map, dist_init, dist, latlon=self.match_latlon, **kwargs)
traj = traj.merge(self.network_edges[['_id', 'n1', 'n2']], how='left', on=['n1', 'n2'])
traj_mov_match.append(traj)
break
except Exception:
if fail < 3:
# print('Set distance higher:')
dist += 5
fail = fail + 1
# print(dist)
# print('Number of fails: ' + str(fail))
elif 2 < fail <= 10:
dist += 10
fail = fail + 1
# print('Set distance higher:')
# print(dist)
# print('Number of fails: ' + str(fail))
elif fail > 10:
dist += 10
dist_init += 50
fail += 1
# print('Still at list ' + str(i))
fails.append(fail)
toc = time.time()
print(f'{int(divmod(toc - tic, 60)[0])} min {int(divmod(toc - tic, 60)[1])} sec')
return traj_mov_match
class TransformTrajectories:
def __init__(self, tr_match_all, network_obj):
tic = time.time()
# tr_match_all is a concatenated dataframe
# set multiindex with track_id and row number
tr_match_all.reset_index(drop=False, inplace=True)
tr_match_all = tr_match_all.rename(columns={'index': 'rid'})
tr_match_all.set_index(['track_id', 'rid'], inplace=True)
idx_counts = tr_match_all.index.get_level_values(0).value_counts()
sp = idx_counts[idx_counts == 1]
mp = idx_counts[idx_counts != 1]
self.tracks = tr_match_all
self.tracks_point = tr_match_all[~tr_match_all._id.isna()].loc[mp.index].sort_index()
self.tracks_single = tr_match_all.loc[sp.index].sort_index()
self.tracks_nan = tr_match_all[tr_match_all._id.isna()]
self.gdf_point = []
self.gdf_line = []
self.column_names = []
self.network = network_obj.network_edges
self.edges_counts = pd.DataFrame()
self.tracks_line = self.wrong_match()
toc = time.time()
print(toc - tic)
def wrong_match(self):
tic = time.time()
gdf_netw = self.network
ldf_all = self.tracks_point.copy()
# Do operations on total dataframe
ldf_all = ldf_all.join(gdf_netw['bearing'], how='left', rsuffix='_edge', on='_id')
diff = ldf_all[['bearing', 'bearing_edge']].values
bearing_diff = [round(min(abs(diff[a][0] - diff[a][1]), 360 - abs(diff[a][0] - diff[a][1])), 1)
for a in range(0, len(ldf_all))]
ldf_all['wrong_match'] = bearing_diff
u_edge, v_edge, w_1, w_2 = [], [], [], []
for j in tqdm(set(ldf_all.index.get_level_values(0))):
df = ldf_all.loc[(j,), ['_id', 'wrong_match']].copy()
# point dataset with nodes of matched edge, this adds column to all original dataframes (chained assignment)
# making line dataset --> always start and end point --> last point has no successive point --> -1
u_edge.extend(df['_id'].values[:-1])
v_edge.extend(df['_id'].values[1:])
w_1.extend(df['wrong_match'].values[:-1])
w_2.extend(df['wrong_match'].values[1:])
print('end of loop')
ldf_start = ldf_all.drop(ldf_all.groupby(level=0).tail(1).index)
ldf_end = ldf_all.drop(ldf_all.groupby(level=0).head(1).index)
ldf_end.set_index(ldf_start.index, inplace=True)
ldf_start = ldf_start.assign(u_match=u_edge, v_match=v_edge, wm1=w_1, wm2=w_2)
ldf_start.drop(['_id', 'n1', 'n2', 'wrong_match', 'time'], axis=1, inplace=True)
ldf_line = ldf_start.join(ldf_end[['lat', 'lon', 'speed', 'lon_acc', 'lat_acc', 'time', 'x', 'y',
'bearing']], lsuffix='_1', rsuffix='_2')
p1 = list(zip(*(ldf_line[f'lat_1'], ldf_line[f'lon_1'])))
p2 = list(zip(*(ldf_line[f'lat_2'], ldf_line[f'lon_2'])))
line_distlatlon = [round(distlatlon.distance(*xy), 3) for xy in tqdm(zip(p1, p2), total=len(p1))]
p1 = list(zip(*(ldf_line[f'y_1'], ldf_line[f'x_1'])))
p2 = list(zip(*(ldf_line[f'y_2'], ldf_line[f'x_2'])))
line_distyx = [round(distxy.distance(*xy), 3) for xy in tqdm(zip(p1, p2), total=len(p1))]
ldf_line['line_length_latlon'] = line_distlatlon
ldf_line['line_length_yx'] = line_distyx
print('Line length column added')
toc = time.time()
print(toc - tic)
return ldf_line
def make_point_trajecories(self):
ldf = self.tracks_point.copy()
if isinstance(ldf, pd.DataFrame):
ldf = make_gdf(ldf)
self.gdf_point = ldf
def make_line_trajectories(self):
ldf = self.tracks_line.copy()
if isinstance(ldf, pd.DataFrame):
ldf = make_gdf(ldf, line=True)
self.gdf_line = ldf
def select_rows(self, segment_index=None):
gdf_list = self.tracks_point
gdf_netw = self.network
if segment_index is None:
segment_index = gdf_netw._id.to_list()
traj_eval = []
for traj in tqdm(gdf_list):
tr = traj.drop(['lon', 'lat'], axis=1)
tr_first = tr.drop_duplicates('_id', keep='first')
idx_first = list(tr_first.index)
tr_first = pd.merge(tr_first, gdf_netw[['_id', 'lon1', 'lat1', 'length']].loc[segment_index]
, how='left', on=['_id'])
tr_first = tr_first.rename(columns={'lon1': 'lon', 'lat1': 'lat'})
tr_first = tr_first.assign(index=idx_first)
tr_last = tr.drop_duplicates('_id', keep='last')
idx_last = list(tr_last.index)
tr_last = pd.merge(tr_last, gdf_netw[['_id', 'lon2', 'lat2', 'length']].loc[segment_index]
, how='left', on=['_id'])
tr_last = tr_last.rename(columns={'lon2': 'lon', 'lat2': 'lat'})
tr_last = tr_last.assign(index=idx_last)
tr_sel = | pd.concat([tr_first, tr_last]) | pandas.concat |
import json
import copy
import unittest
import tempfile
import numpy as np
import pandas as pd
import uuid
from supervised.preprocessing.preprocessing_missing import PreprocessingMissingValues
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
from supervised.preprocessing.preprocessing_step import PreprocessingStep
class PreprocessingStepTest(unittest.TestCase):
def test_constructor_preprocessing_step(self):
preprocessing_params = {}
ps = PreprocessingStep(preprocessing_params)
self.assertTrue(len(ps._missing_values) == 0)
self.assertTrue(len(ps._categorical) == 0)
self.assertTrue(ps._categorical_y is None)
def test_exclude_missing_targets_all_good(self):
# training data
d = {
"col1": [1, 1, 1, 3],
"col2": [5, 6, 7, 0],
"col3": [1, 1, 1, 3],
"col4": [2, 2, 4, 3],
"y": [0, 1, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
ps = PreprocessingStep()
X_train, y_train = ps._exclude_missing_targets(X_train, y_train)
self.assertEqual(4, X_train.shape[0])
self.assertEqual(4, y_train.shape[0])
def test_exclude_missing_targets(self):
# training data
d = {
"col1": [1, 1, 1, 3],
"col2": [5, 6, 7, 0],
"col3": [1, 1, 1, 3],
"col4": [2, 2, 4, 3],
"y": [0, np.nan, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
ps = PreprocessingStep()
X_train, y_train = ps._exclude_missing_targets(X_train, y_train)
self.assertEqual(3, X_train.shape[0])
self.assertEqual(3, y_train.shape[0])
def test_run_exclude_missing_targets(self):
# training data
d = {
"col1": [1, 1, 1, 3],
"col2": [5, 6, 7, 0],
"col3": [1, 1, 1, 3],
"col4": [2, 2, 4, 3],
"y": [0, np.nan, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
ps = PreprocessingStep()
train_data, _ = ps.run(train_data={"X": X_train, "y": y_train})
X_train, y_train = train_data.get("X"), train_data.get("y")
self.assertEqual(3, X_train.shape[0])
self.assertEqual(3, y_train.shape[0])
def test_run_all_good(self):
# training data
d = {
"col1": [1, 1, 1, 3],
"col2": [5, 6, 7, 0],
"col3": [1, 1, 1, 3],
"col4": [2, 2, 4, 3],
"y": [0, 1, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
preprocessing_params = {
"columns_preprocessing": {
"col1": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col2": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col3": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col4": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
}
}
ps = PreprocessingStep(preprocessing_params)
train_data, _ = ps.run(train_data={"X": X_train, "y": y_train})
X_train, y_train = train_data.get("X"), train_data.get("y")
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in X_train.columns)
params_json = ps.to_json()
self.assertFalse(params_json) # should be empty
def test_run_fill_median_convert_integer(self):
# training data
d = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [0, 1, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
preprocessing_params = {
"columns_preprocessing": {
"col1": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col2": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col3": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col4": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
}
}
ps = PreprocessingStep(preprocessing_params)
train_data, _ = ps.run(train_data={"X": X_train, "y": y_train})
X_train, y_train = train_data.get("X"), train_data.get("y")
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in X_train.columns)
self.assertEqual(X_train["col1"][2], 1)
self.assertEqual(X_train["col2"][2], 0)
self.assertEqual(X_train["col4"][0], 0)
self.assertEqual(X_train["col4"][1], 0)
self.assertEqual(X_train["col4"][2], 1)
self.assertEqual(X_train["col4"][3], 2)
params_json = ps.to_json()
self.assertTrue("missing_values" in params_json)
self.assertTrue("categorical" in params_json)
self.assertTrue("categorical_y" not in params_json)
self.assertTrue("fill_params" in params_json["missing_values"][0])
self.assertEqual(
"na_fill_median", params_json["missing_values"][0]["fill_method"]
)
self.assertTrue("convert_params" in params_json["categorical"][0])
self.assertEqual(
"categorical_to_int", params_json["categorical"][0]["convert_method"]
)
def test_run_fill_median_convert_integer_validation_dataset(self):
# training data
d = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [0, 1, 1, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
d_test = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [np.nan, 1, np.nan, 1],
}
df_test = pd.DataFrame(data=d_test)
X_test = df_test.loc[:, ["col1", "col2", "col3", "col4"]]
y_test = df_test.loc[:, "y"]
preprocessing_params = {
"columns_preprocessing": {
"col1": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col2": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col3": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col4": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
}
}
ps = PreprocessingStep(preprocessing_params)
train_data, validation_data = ps.run(
train_data={"X": X_train, "y": y_train},
validation_data={"X": X_test, "y": y_test},
)
X_train, y_train = train_data.get("X"), train_data.get("y")
X_test, y_test = validation_data.get("X"), validation_data.get("y")
for col in ["col1", "col2", "col3", "col4"]:
self.assertTrue(col in X_train.columns)
self.assertTrue(col in X_test.columns)
self.assertEqual(4, X_train.shape[0])
self.assertEqual(4, y_train.shape[0])
self.assertEqual(2, X_test.shape[0])
self.assertEqual(2, y_test.shape[0])
def test_run_on_y_only(self):
d = {"y": ["a", "b", "a", "b"]}
df = pd.DataFrame(data=d)
y_train = df.loc[:, "y"]
preprocessing_params = {
"target_preprocessing": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
]
}
ps = PreprocessingStep(preprocessing_params)
train_data, _ = ps.run(train_data={"y": y_train})
y_train = train_data.get("y")
self.assertEqual(4, y_train.shape[0])
self.assertEqual(0, y_train[0])
self.assertEqual(1, y_train[1])
def test_run_on_y_only_validation(self):
d = {"y": ["a", "b", "a", "b"]}
df = pd.DataFrame(data=d)
y_train = df.loc[:, "y"]
d_test = {"y": [np.nan, "a", np.nan, "b"]}
df_test = pd.DataFrame(data=d_test)
y_test = df_test.loc[:, "y"]
preprocessing_params = {
"target_preprocessing": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
]
}
ps = PreprocessingStep(preprocessing_params)
train_data, validation_data = ps.run(
train_data={"y": y_train}, validation_data={"y": y_test}
)
y_train = train_data.get("y")
y_test = validation_data.get("y")
self.assertEqual(4, y_train.shape[0])
self.assertEqual(2, y_test.shape[0])
self.assertEqual(0, y_train[0])
self.assertEqual(1, y_train[1])
self.assertEqual(0, y_test[0])
self.assertEqual(1, y_test[1])
def test_to_and_from_json_run_fill_median_convert_integer(self):
# training data
d = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [0, 1, 0, 1],
}
df = pd.DataFrame(data=d)
X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
y_train = df.loc[:, "y"]
preprocessing_params = {
"columns_preprocessing": {
"col1": [PreprocessingMissingValues.FILL_NA_MEDIAN],
"col2": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
"col4": [
PreprocessingMissingValues.FILL_NA_MEDIAN,
PreprocessingCategorical.CONVERT_INTEGER,
],
},
"target_preprocessing": [],
}
ps = PreprocessingStep(preprocessing_params)
train_data, _ = ps.run(train_data={"X": X_train, "y": y_train})
ps2 = PreprocessingStep()
ps2.from_json(ps.to_json())
del ps
d_test = {
"col1": [1, 1, np.nan, 3],
"col2": ["a", "a", np.nan, "a"],
"col3": [1, 1, 1, 3],
"col4": ["a", "a", "b", "c"],
"y": [np.nan, np.nan, 1, 1],
}
df_test = | pd.DataFrame(data=d_test) | pandas.DataFrame |
"""
Testing framework for the `ArrayCableInstallation` class.
"""
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from ORBIT import ProjectManager
from tests.data import test_weather
from ORBIT.core.library import extract_library_specs
from ORBIT.core.defaults import process_times as pt
from ORBIT.phases.install import ArrayCableInstallation
base_config = extract_library_specs("config", "array_cable_install")
simul_config = deepcopy(base_config)
_ = simul_config.pop("array_cable_bury_vessel")
@pytest.mark.parametrize(
"config", (base_config, simul_config), ids=["separate", "simultaneous"]
)
def test_simulation_setup(config):
sim = ArrayCableInstallation(config)
assert sim.env
@pytest.mark.parametrize(
"config", (base_config, simul_config), ids=["separate", "simultaneous"]
)
def test_vessel_initialization(config):
sim = ArrayCableInstallation(config)
assert sim.install_vessel
assert sim.install_vessel.cable_storage
if config.get("array_cable_bury_vessel", None):
assert sim.bury_vessel
@pytest.mark.parametrize(
"config", (base_config, simul_config), ids=["separate", "simultaneous"]
)
@pytest.mark.parametrize(
"weather", (None, test_weather), ids=["no_weather", "test_weather"]
)
def test_for_complete_logging(config, weather):
sim = ArrayCableInstallation(config, weather=weather)
sim.run()
df = | pd.DataFrame(sim.env.actions) | pandas.DataFrame |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
| assert_series_equal(out, exp) | pandas.testing.assert_series_equal |
import pandas as pd
import tensorflow as tf
from pathlib import Path
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
#enviroment settings
path = Path(__file__).parent.absolute()/'Deep Training'
name_data = 'none_'#''
metric = 'binary_accuracy'
minimise = False
#parameter settings
model_keys = ['optimizer','layers','activations','dropouts']
blueprint_keys = ['predictors','identifier']+model_keys
#log settings
log_keys = ['timestamp']+blueprint_keys+['dimensions','length','nodes','loss',metric,'time','epochs']
sort_fields = [metric, 'loss', 'epochs', 'nodes', 'time']
sort_conditions = [minimise, True, True, True, True]
predictor_log_path = path/'Logs'/(name_data+'predictor_evaluation_log.csv')
parameter_log_path = path/'Logs'/(name_data+'parameter_evaluation_log.csv')
re_parameter_log_path = path/'Logs'/(name_data+'re_parameter_evaluation_log.csv')
#model settings
models_path = path/'Models'
#data settings
data_path = path/'Data'
targets_name = 'None_Targets.csv'
predictors_name = 'None_Predictors.csv'
targets_columns = ['Home: Win','Visiting: Win']
predictors_columns = None
targets_index = False
predictors_index = False
#data enviroment
targets = pd.read_csv(data_path/targets_name, usecols=targets_columns, index_col=targets_index)
predictors = pd.read_csv(data_path/predictors_name, usecols=predictors_columns, index_col=predictors_index)
data_date = datetime.fromtimestamp((data_path/'Validation'/(name_data+'validation_targets.csv')).stat().st_mtime)
validation_targets = pd.read_csv(data_path/'Validation'/(name_data+'validation_targets.csv'), index_col=targets_index)
validation_predictors = pd.read_csv(data_path/'Validation'/(name_data+'validation_predictors.csv'), index_col=predictors_index)
training_targets = pd.read_csv(data_path/'Training'/(name_data+'training_targets.csv'), index_col=targets_index)
training_predictors = pd.read_csv(data_path/'Training'/(name_data+'training_predictors.csv'), index_col=predictors_index)
#gpu settings
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
#functions
def load_log(log_path):
#enviroment
log = pd.read_csv(log_path, index_col=False)
#procedure
if log.empty:
return log
for column in log.loc[:,log.dtypes==object]:
if (log[column][0].find('[')>-1 and log[column][0].find(']')>-1):
log[column] = log[column].str.replace("'",'').str.replace(', ',',').str.replace('[','').str.replace(']','').str.split(',')
if column=='layers' or column=='dropouts':
newCol = []
for element in log[column].tolist():
newElement = []
for value in element:
newElement.append(int(value))
newCol.append(newElement)
log[column] = pd.Series(newCol)
return log
def change_identifier(log_path, get_identifier_function):
#enviroment
log = load_log(log_path)
#procedure
print('Old:')
print(log[['predictors','identifier']])
log = log.to_dict('records')
for entry in log:
entry['identifier'] = get_identifier_function(entry['predictors'])
log = pd.DataFrame(log)
print('New:')
print(log[['predictors','identifier']])
return log
def find_duplicates():
#enviroment
predictor_log = load_log(predictor_log_path)
#procedure
duplicates = predictor_log[predictor_log.duplicated(keep=False, subset='identifier')]
return duplicates.sort_values(by=['identifier']+sort_fields, ascending=[True]+sort_conditions)
def drop_duplicates():
#enviroment
duplicates = find_duplicates().drop_duplicates(subset=['identifier'], keep='last')
log = load_log(predictor_log_path)
#procedure
return log.drop(duplicates.index)
def find_best(log_path,n=1):
#enviroment
log = load_log(log_path)
#procedure
best = log.sort_values(by=sort_fields, ascending=sort_conditions)
return best[:n]
def test_best(log_path):
#enviroment
best = find_best(log_path).to_dict('records')[0]
#procedeure
model_predictors = predictors[best['predictors']]
model_file = best['identifier']+'.h5'
model = load_model(models_path/model_file)
predictions = pd.DataFrame(model.predict(model_predictors), columns=targets.columns)
evaluation_frame = | pd.merge(targets, predictions, how='left', left_index=True, right_index=True, suffixes=('',' prediction')) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 5 21:13:34 2016
@author: Marty
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
from unittest import mock
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from hydrofunctions import station, typing
from .fixtures import (
fakeResponse,
recent_only,
)
class TestingNWIS(station.NWIS):
"""
This subclass of NWIS is for testing all of the NWIS methods except
__init__, which we will replace. All of the other methods get inherited
verbatim, so we can test them using TestingNWIS instead of NWIS.
"""
def __init__(
self,
site=None,
service=None,
start_date=None,
end_date=None,
dataframe=None,
meta=None,
start=None,
end=None,
):
self.site = site
self.service = service
self.start_date = start_date
self.end_date = end_date
self._dataframe = dataframe
self.meta = meta
self.start = start
self.end = end
class TestStation(unittest.TestCase):
def test_station_is_obj(self):
actual = station.Station()
self.assertIsInstance(actual, station.Station)
def test_station_site_defaults_to_None(self):
actual = station.Station()
self.assertIsNone(actual.site)
def test_station_id_sets(self):
expected = "01234567"
actual = station.Station(expected)
another = station.Station("23456789")
self.assertEqual(actual.site, expected)
self.assertEqual(another.site, "23456789")
def test_station_dict_returns_dict(self):
actual = station.Station("first")
self.assertIsInstance(actual.station_dict, dict)
def test_multiple_instances_only_one_list(self):
first = station.Station("first")
second = station.Station("second")
self.assertEqual(first.station_dict, second.station_dict)
def test_station_dict_keeps_keys(self):
first = station.Station("first")
second = station.Station("second")
actual = first.station_dict
self.assertIn("first", actual)
self.assertIn("second", actual)
self.assertEqual(
len(actual),
2,
"The dict length is not equal to the \
number of instances",
)
def test_station_dict_returns_instance(self):
first = station.Station("first")
second = station.Station("second")
expected = first
# Look at the station_dict; does it contain a ref to 'first'?
actual = second.station_dict["first"]
self.assertEqual(actual, expected)
def test_station_subclasses_maintain_same_station_dict(self):
class Foo(station.Station):
pass
foo_inst = Foo("foo")
station_inst = station.Station("station")
self.assertIn("station", foo_inst.station_dict)
self.assertIn("foo", station_inst.station_dict)
actual = station_inst.station_dict["foo"]
self.assertIsInstance(actual, Foo)
class TestNWISinit(unittest.TestCase):
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
def test_NWIS_init_check_defaults(self, mock_get_nwis_property, mock_get_nwis):
default_site = None
default_service = "dv"
default_start = None
default_end = None
default_parameterCd = "all"
default_period = None
default_stateCd = None
default_countyCd = None
default_bBox = None
mock_get_nwis_property.return_value = "expected"
mock_get_nwis.return_value = fakeResponse()
station.NWIS()
mock_get_nwis.assert_called_once_with(
default_site,
default_service,
default_start,
default_end,
parameterCd=default_parameterCd,
period=default_period,
stateCd=default_stateCd,
countyCd=default_countyCd,
bBox=default_bBox,
)
self.assertTrue(mock_get_nwis_property)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
def test_NWIS_init_calls_get_nwis_and_get_prop(
self, mock_get_nwis_property, mock_get_nwis
):
site = "expected site"
service = "expected service"
start = "expected start"
end = "expected end"
parameterCd = "expected pCode"
mock_get_nwis_property.return_value = "expected"
mock_get_nwis.return_value = fakeResponse()
station.NWIS(site, service, start, end, parameterCd=parameterCd)
mock_get_nwis.assert_called_once_with(
site,
service,
start,
end,
parameterCd=parameterCd,
period=None,
stateCd=None,
countyCd=None,
bBox=None,
)
self.assertTrue(mock_get_nwis_property)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
def test_NWIS_init_sets_url_ok_json(
self, mock_extract_nwis_df, mock_get_nwis_property, mock_get_nwis
):
expected_url = "expected url"
expected_ok = True
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(
code=200, url=expected_url, json=expected_json
)
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_extract_nwis_df.return_value = (mock_df, "expected_dict")
actual = station.NWIS()
# self.assertEqual(actual.url, expected_url, "NWIS.__init__() did not set self.url properly.")
self.assertEqual(
actual.ok, expected_ok, "NWIS.__init__() did not set self.ok properly."
)
self.assertEqual(
actual.json,
expected_json,
"NWIS.__init__() did not set self.json properly.",
)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
def test_NWIS_init_calls_extract_nwis_df(
self, mock_extract_nwis_df, mock_get_nwis_property, mock_get_nwis
):
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(json=expected_json)
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_extract_nwis_df.return_value = (mock_df, "expected dict")
actual = station.NWIS()
mock_extract_nwis_df.assert_called_once_with(expected_json)
@mock.patch("hydrofunctions.hydrofunctions.read_parquet")
def test_NWIS_init_filename_calls_read_parquet(self, mock_read):
expected_filename = "expected_filename"
expected_meta = "expected meta"
expected_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_start = "expected start"
mock_end = "expected end"
mock_read.return_value = (expected_df, expected_meta)
actual = station.NWIS(file=expected_filename)
mock_read.assert_called_once_with(expected_filename)
assert_frame_equal(expected_df, actual._dataframe)
self.assertEqual(
expected_meta,
actual.meta,
"The metadata were not retrieved by NWIS.read().",
)
@mock.patch("hydrofunctions.hydrofunctions.read_parquet")
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
# @mock.patch("hydrofunctions.hydrofunctions.get_nwis_property")
@mock.patch("hydrofunctions.hydrofunctions.extract_nwis_df")
@mock.patch("hydrofunctions.hydrofunctions.save_parquet")
def test_NWIS_init_filename_calls_read_parquet_then_get_nwis(
self, mock_save, mock_extract_nwis_df, mock_get_nwis, mock_read
):
# Mocks listed in order that they get called.
# mock_read: pretend file doesn't exist, so return OSError
# file exists:
# mock_read.return_value = (expected_df, expected_meta)
# file doesn't exist, raise error:
mock_read.side_effect = OSError()
# mock_get_nwis
expected_json = "expected json"
mock_get_nwis.return_value = fakeResponse(json=expected_json)
# mock_get_nwis_property
# never called
# mock_extract_nwis_df
mock_df = pd.DataFrame(
np.random.randn(5, 1),
columns=["A"],
index=pd.date_range("20130101", periods=5, freq="T"),
)
mock_meta = "mock meta"
mock_extract_nwis_df.return_value = (mock_df, mock_meta)
# mock_save
expected_filename = "expected_filename"
mock_save.return_value = "expected self"
# Create an NWIS with a filename, but the filename doesn't exist.
# so an OSError is returned.
# So now get_nwis is called, extract_nwis_df, save().
actual = station.NWIS(file=expected_filename)
mock_save.assert_called_once_with(expected_filename, mock_df, mock_meta)
@mock.patch("hydrofunctions.hydrofunctions.get_nwis")
def test_NWIS_init_request_most_recent_only(self, mock_get_nwis):
expected_json = recent_only
expected_url = (
"https://waterservices.usgs.gov/nwis/dv/?format=json%2C1.1&sites=01541200"
)
mock_get_nwis.return_value = fakeResponse(json=expected_json, url=expected_url)
actual = station.NWIS("01541200")
self.assertEqual(
actual.df().shape,
(2, 4),
"The dataframe should only have two rows and four columns.",
)
class TestNWISmethods(unittest.TestCase):
"""
Tests for NWIS methods
The following section is for testing all of the NWIS methods
EXCEPT the NWIS.__init__() method.
Creating an NWIS instance will always run the __init__ method, which we
usually don't want to do. It calls a bunch of functions that we test
elsewhere and it causes a bunch of side effects that we don't want. Yes,
you can mock all of the functions that __init__ calls, but even then there
can be unwanted side effects not to mention it can be tedious to mock so
many different things.
To test any method other than __init__, we will use the following strategy:
- create a sub-class of NWIS called TestingNWIS.
- TestingNWIS has a different __init__ method that allows you to pass
in a dataframe and any other initial parameter
- all other methods gets inherited from NWIS, so we can test them.
"""
def test_NWIS_df_returns_all_columns(self):
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=expected_cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df()
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols, expected_cols, "NWIS.df() should return all of the columns."
)
def test_NWIS_df_all_returns_all_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = cols
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("all")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('all') should return all of the columns.",
)
def test_NWIS_df_data_returns_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("data")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('data') should return all of the data columns.",
)
def test_NWIS_df_discharge_returns_discharge_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00060:00000", "USGS:01541303:00060:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("discharge")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('discharge') should return all of the discharge data columns.",
)
def test_NWIS_df_q_returns_discharge_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00060:00000", "USGS:01541303:00060:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("q")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('q') should return all of the discharge data columns.",
)
def test_NWIS_df_stage_returns_stage_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00065:00000", "USGS:01541303:00065:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("stage")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('stage') should return all of the stage data columns.",
)
def test_NWIS_df_flags_returns_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00065:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("flags")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('flags') should return all of the qualifier columns.",
)
def test_NWIS_df_flags_q_returns_discharge_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541303:00060:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("flags", "q")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('flags', 'q') should return all of the discharge qualifier columns.",
)
def test_NWIS_df_stage_flags_returns_stage_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541303:00065:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("stage", "flags")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('stage', 'flags') should return all of the stage qualifier columns.",
)
def test_NWIS_df_crazy_input_raises_ValueError(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
with self.assertRaises(
ValueError,
msg="unmatched args such as NWIS.df('crazy', 'input') should cause NWIS.df() to raise a ValueError.",
):
actual_df = test_nwis.df("discharge", "crazy", "input")
def test_NWIS_df_5digits_returns_param_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00065:00000", "USGS:01541303:00065:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("00065")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('00065') should return all of the 00065 data columns.",
)
def test_NWIS_df_5digits_and_flags_returns_00065_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541303:00065:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("00065", "flags")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('00065', 'flags') should return all of the 00065 _qualifiers columns.",
)
def test_NWIS_df_5digits_no_match_raises_ValueError(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
with self.assertRaises(
ValueError,
msg="A five-digit input that doesn't match a paramCode should raise a ValueError.",
):
actual_df = test_nwis.df("00000")
def test_NWIS_df_6digits_raises_ValueError(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
with self.assertRaises(
ValueError,
msg="A six-digit input like .df('123456') should raise a ValueError.",
):
actual_df = test_nwis.df("123456")
def test_NWIS_df_7digits_raises_ValueError(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
with self.assertRaises(
ValueError,
msg="A seven-digit input like .df('1234567') should raise a ValueError.",
):
actual_df = test_nwis.df("1234567")
def test_NWIS_df_8digits_returns_station_data_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = ["USGS:01541200:00060:00000", "USGS:01541200:00065:00000"]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("01541200")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('01541200') should return all of the data columns for station 01541200.",
)
def test_NWIS_df_8digits_and_flags_returns_station_qualifiers_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00065:00000_qualifiers",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
actual_df = test_nwis.df("01541200", "flags")
actual_cols = actual_df.columns.tolist()
self.assertListEqual(
actual_cols,
expected_cols,
"NWIS.df('01541200', 'flags') should return all of the _qualifiers columns for station '01541200'.",
)
def test_NWIS_df_8digits_no_match_raises_ValueError(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = pd.DataFrame(data=data, columns=cols)
test_nwis = TestingNWIS(dataframe=test_df)
with self.assertRaises(
ValueError,
msg="An eight-digit input that doesn't match a station id should raise a ValueError.",
):
actual_df = test_nwis.df("12345678")
def test_NWIS_df_two_sites_returns_two_site_columns(self):
cols = [
"USGS:01541200:00060:00000_qualifiers",
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000_qualifiers",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000_qualifiers",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000_qualifiers",
"USGS:01541303:00065:00000",
]
expected_cols = [
"USGS:01541200:00060:00000",
"USGS:01541200:00065:00000",
"USGS:01541303:00060:00000",
"USGS:01541303:00065:00000",
]
data = [
["test", 5, "test", 5, "test", 5, "test", 5],
["test", 5, "test", 5, "test", 5, "test", 5],
]
test_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
import os
import json
import tzlocal
import numpy as np
import pandas as pd
from Fetcher import Dataset
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing
# global variable
list_town = ["Miami", "New York", "Las Vegas", "Chicago", "Seattle", "San Francisco", "Washington","New Orleans", "Palm Springs", "San Diego", "Charleston"]
list_dataset = ["temp", "rainfall", "snowfall", "wind", "solar"]
def process_download():
d = Dataset()
print("---------------------------------------------------------------------")
for town in list_town:
for dataset in list_dataset[:1]:
print(f"-dataset: {dataset} -town: {town}")
res = d.download(town, dataset) # At each download, it will overwrite value of the constructor with new value of town and dataset_code
d.save(res)
def training(days=30):
d = Dataset()
final_df = []
for dataset in list_dataset[:1]:
for town in list_town:
print(f"dataset/{dataset}/{town}.json")
if os.path.exists(f"dataset/{dataset}/{town}.json"):
print(f"{dataset} - {town}")
res = json.load(open(f"dataset/{dataset}/{town}.json"))
data = res["data"]
index = pd.to_datetime(list(data.keys()))
values = [float(s) if s else None for s in data.values()]
series = pd.Series(values, index=index)
df = series.to_frame(name='Value')
df = df[~df.index.astype(str).str.contains('02-29')]
#algorithm
print("Training...")
hw_model = ExponentialSmoothing(df["Value"],
trend ="add",
seasonal = "add",
seasonal_periods=365,
damped=False
).fit(use_boxcox="log") # damped=False
hw_fitted = hw_model.fittedvalues
hw_resid = hw_model.resid
days_in_future = 30
# Adding the mean of the residuals to correct the bias.
py_hw = hw_model.forecast(days_in_future) + np.mean(hw_resid)
# to frame
df = py_hw.to_frame()
# get lat and long
lat, long = d.get_lat_lon(town)
df["lat"] = str(lat)
df["long"] = str(long)
df["town"] = str(town)
final_df.append(df)
df = None
to_save = | pd.concat(final_df) | pandas.concat |
# -*- coding: utf-8 -*-
"""Precily.ipynb
Automatically generated by Colaboratory.
Author:: <NAME>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf #version 2.0
import tensorflow_hub as hub
from math import *
import numpy as np
import pandas as pd
#tf.random.set_seed(1)
#reading dataset from csv file
data=pd.read_csv("Text_Similarity_Dataset.csv")
print("\n[OUT]: Top 5 rows of data:\n",data.head(5))
#url of pretrained model present in TensorFlow hub
url="https://tfhub.dev/google/universal-sentence-encoder-large/3"
#getting model
embed =hub.KerasLayer(url)
#getting embeddings of the text1 and text2 present in row 0
embeddings=embed(np.array([data.iloc[0]['text1'],data.iloc[0]['text2']]))
#how paragraph in row 0 looks
print("text1 of row 0: \n",data.iloc[0]['text1'])
print("text2 of row 0: \n",data.iloc[0]['text2'])
#embeddings of each text represented by 512 values
print("\n [OUT]: Emdedding(Tensor representation) of text1 in row 0: \n",embeddings[0])
print("\n Similarity of the text0 and text1 of row 0: ", np.inner(embeddings[0],embeddings[1]))
print("\n Taking four digit after decimal",round(np.inner(embeddings[0],embeddings[1]),4))
print("\n[OUT]: Total number of data Samples: ",data.shape[0])
#i have taken Similarity Score Upto 5 Places after decimal
#here I have used only trivial inner product for finiding Similarity between vectors
def calculate_STS_Inner_product():
d={"Unique_ID":[],"Similarity_Score":[]}
for i in range(data.shape[0]):
embeddings=embed(np.array([data.iloc[i]['text1'],data.iloc[i]['text2']]))
d["Unique_ID"].append(i)
d["Similarity_Score"].append(round(max(np.inner(embeddings[0],embeddings[1]),0.0),5))
return d
#calling the function to get Similarity Score
STS_in_p=calculate_STS_Inner_product()
print("\n[OUT]: Number of values in Similarity Score list: ",len(STS_in_p["Similarity_Score"]))
print("\n[OUT]: Semantic Textual Similarity:\n Unique_ID: ",STS_in_p["Unique_ID"])
print("\n Similarity Score: ",STS_in_p["Similarity_Score"])
#converting result STS_in_p to dataframe to convert it to csv file
dfr= | pd.DataFrame(STS_in_p) | pandas.DataFrame |
import pandas as pd
import numpy as np
file1 = '../data/F9.xlsx'
x1 = pd.ExcelFile(file1)
feature = x1.parse('Sheet1')
print(feature.shape)
feature = feature.drop(['\'DAY_OF_DISCHARGE\''], axis=1)
feature = feature.drop(['\'FOLLOW_UP_3WEEKS\''], axis=1)
feature = feature.drop(['\'FOLLOW_UP_8WEEKS\''], axis=1)
feature = feature.drop(['PREOP_PAIN'], axis=1)
feature = feature.drop(['CHANGE_DISCHARGE'], axis=1)
feature = feature.drop(['CHANGE_FOLLOWUP_3'], axis=1)
feature = feature.drop(['CHANGE_FOLLOWUP_8'], axis=1)
print(feature.shape)
file2 = '../data/PAIN_SCORES.xlsx'
x2 = | pd.ExcelFile(file2) | pandas.ExcelFile |
import pandas as pd
import os, requests, logging
import sys
# from bs4 import BeautifulSoup as bs
from .utils import *
class EdgarBase(object):
def __init__(self, dir_edgar=None):
# self.dir_edgar =
# self.__dir_download = None
# self.__dir_data = None
self.__dir_output = None
self.ulr_sec = 'https://www.sec.gov/Archives/'
self.__dir_config = None
self.dir_curr = os.path.abspath(os.path.dirname(__file__))
self.dir_config = os.path.join(self.dir_curr, 'config')
self.today = pd.datetime.today()
self.__fact_mapping = None
self.__dir_edgar = dir_edgar
self.__cache_file = {}
@property
def dir_edgar(self):
if self.__dir_edgar is None:
logger.error('please set output data directory ')
if 'DIR_EDGAR' not in os.environ:
logger.error('please set environment variable DIR_EDGAR')
logger.error("os.environ['DIR_EDGAR']=/path/to/dir'")
import tempfile
self.__dir_edgar = tempfile.gettempdir()
else:
self.__dir_edgar = os.environ['DIR_EDGAR']
return self.__dir_edgar
def set_dir_edgar(self, dir_edgar):
if not os.path.exists(dir_edgar):
os.makedirs(dir_edgar)
self.__dir_edgar = dir_edgar
return self
@property
def _dir_download(self):
# dir_download = os.path.join(self.dir_edgar, 'download')
# if not os.path.isdir(dir_download):
# os.makedirs(dir_download)
return self.dir_edgar
def set_dir_config(self, dir_input):
logger.info('setting dir_config={f}'.format(f=dir_input))
self.dir_curr = dir_input
@property
def fact_mapping(self):
if self.__fact_mapping is None:
path_fact_mapping = os.path.join(self.dir_config, 'fact_mapping.csv')
logger.info('reading fact_mapping from {f}'.format(f=path_fact_mapping))
fm = pd.read_csv(path_fact_mapping).set_index('item')
self.__fact_mapping = fm
else:
fm = self.__fact_mapping
return fm
def get_cik(self, ticker):
return ticker2cik(ticker)
def get_filing_path(self, ticker, filing_type=None, start_date=None, end_date=None):
"""
:param ticker:
:param filing_type: '10-K', '10-Q', etc...
:param start_date: str or datetime
:param end_date: str or datetime
:return: data frame columns=ticker|cik|filing_type|date|filepath
"""
pass
def parse_filing(self, filepath, section):
pass
def reindex_master(self, start_date=None, end_date=None):
pass
class EdgarDownloader(EdgarBase):
def __init__(self, dir_edgar):
super(EdgarDownloader, self).__init__(dir_edgar)
self.__conn_master_db = None
self.valid_form_type = ['10-Q', '10-K', '8-K']
def __exit__(self):
self._close_master_db()
@property
def _dir_master(self):
dir_master = os.path.join(self.dir_edgar, 'master')
if not os.path.isdir(dir_master):
os.makedirs(dir_master)
return dir_master
@property
def conn_master_db(self):
file_master_db = os.path.join(self.dir_edgar, 'master_idx.db')
if self.__conn_master_db is None:
import sqlite3
if not os.path.exists(file_master_db):
conn = sqlite3.connect(file_master_db)
pd.DataFrame().to_sql('master_idx', conn)
else:
conn = sqlite3.connect(file_master_db)
self.__conn_master_db = conn
return self.__conn_master_db
def _close_master_db(self):
conn = self.__conn_master_db
conn.close()
self.__conn_master_db = None
def load_master_db(self, start_date, end_date=None, force_reload=False):
#start_date = pd.to_datetime(str(start_date))
#end_date = pd.datetime.today() if end_date is None else pd.to_datetime(str(end_date))
list_yyyyqq = self._yyyyqq_between(start_date, end_date)
"edgar/full-index/{yyyy}/QTR{q}/master.idx"
list_file_master = ["edgar/full-index/{y}/QTR{q}/master.idx".format(y=yq.split('Q')[0], q=yq.split('Q')[1])
for yq in list_yyyyqq]
#list_file_download = [f for f in list_file_master if not os.path.exists(f) or force_reload]
list_file_downloaded = download_list(list_file_master, self.dir_edgar, force_download=force_reload)
self._update_master_db(list_file_downloaded)
def _update_master_db(self, list_files):
conn = self.conn_master_db
col_names = ['cik', 'company_name', 'form_type', 'date_filed', 'filename']
dfs = dd.read_csv(list_files, sep='|', skiprows=11, header=None)
dfs.columns = col_names
df_load = dfs[dfs['form_type'].isin(self.valid_form_type)].compute()
sql_all = 'select * from master_idx'
df_all = pd.read_sql_query(sql_all, conn)
logger.info('read master_idx db, n={s}'.format(s=df_all.shape[0]))
df_all = pd.concat([df_all, df_load], sort=False).drop_duplicates()
df_all.to_sql('master_idx', conn, if_exists='replace', index=False)
logger.info('write master_idx db, n={s}'.format(s=df_all.shape[0]))
return 0
# def _refresh_master_idx(self, yyyy, q, force=False):
# # yyyy, q = self._year_quarter(date)
# file_master = os.path.join(self._dir_master, "{y}_QTR{q}_master.csv".format(y=yyyy, q=q))
# if not os.path.exists(file_master) or force:
# url_master = self._url_master_idx(yyyy, q)
# logger.info('downloading {f}'.format(f=url_master))
# resp = req.get(url_master)
# if resp.status_code != 200:
# logger.error('error downloading {f}'.format(f=url_master))
# else:
# write_data = '\n'.join(resp.content.decode('latin1').split('\n')[11:])
# logger.info('saving {f}'.format(f=file_master))
# with open(file_master, 'w+', encoding='utf-8') as f:
# f.write("cik|company|form_type|file_date|file_name\n")
# f.write(write_data)
# self._update_master_db([file_master])
# else:
# logger.info('use existing file. {f}'.format(f=file_master))
# return file_master
def filings_between(self, symbol, start_date, end_date=None, form_type='10-K', download=True):
#list_year_quarter = self._yyyyqq_between(start_date, end_date)
#list_master_file = [self._refresh_master_idx(t.split('Q')) for t in list_year_quarter]
# dfs = dd.read_csv(list_master_file, sep='|')
cik = int(ticker2cik(symbol))
# df_res = dfs[(dfs.cik == cik) & (dfs.form_type == form_type)].compute()
sql_filings = "select * from master_idx where cik=={cik} and form_type=='{f}' " \
"and date_filed>='{t0}' ".format(cik=cik, f=form_type, t0=pd.to_datetime(start_date).date())
if end_date:
sql_filings += "and file_date<'{t1}'".format(t1=pd.to_datetime(end_date).date())
df_res = pd.read_sql_query(sql_filings, self.conn_master_db)
list_filename = df_res['filename'].tolist()
if download:
list_filename = download_list(list_filename, self._dir_download, force_download=True)
return list_filename
# @staticmethod
# def _url_master_idx(yyyy, q):
# url = "https://www.sec.gov/Archives/edgar/full-index/{yyyy}/QTR{q}/master.idx".format(yyyy=yyyy, q=q)
# return url
# @staticmethod
# def _year_quarter(date=pd.datetime.today()):
# t = pd.to_datetime(date).date()
# return t.year, (t.month - 1) // 3 + 1
@staticmethod
def _yyyyqq(date):
yq = pd.Period(pd.to_datetime(str(date)), freq='Q')
return str(yq)
def _yyyyqq_between(self, start_date, end_date=None):
end_date = | pd.datetime.today() | pandas.datetime.today |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: | pd.Timestamp("2013-03-26 00:00:00") | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from scipy import signal
import data_processing0 as dp
import datetime
import math
from scipy.spatial.distance import pdist, squareform
DATA_PATH = "sitaiqu/samples_image/"
import os
if not os.path.isdir(DATA_PATH):
os.makedirs(DATA_PATH)
if not os.path.isdir("sitaiqu/samples/"):
os.makedirs("sitaiqu/samples/")
begin = "2014/08/01"
end = "2016/08/01"
ID=1504523749
def rec_plot(s, eps=None, steps=None):
if eps==None: eps=0.1
if steps==None: steps=100
d = pdist(s[:,None])
d = np.floor(d/eps)
d[d>steps] = steps
Z = squareform(d)
return Z
def single_input(id=ID, DATA_PATH = DATA_PATH, begin = begin, end = end,mode=0, df=None):
filename = dp.DATA_PATH + "kilowatt_everyday_2year.xlsx"
dtypes = {'index': 'int', 'redidentsID': 'str', 'userID': 'str', 'meterID': 'str', 'misc': 'str', 'sequence': 'str'}
parse_dates = ['date']
df = df[df['meterID'] == id]
res = pd.DataFrame()
res['id'] = df['meterID']
res['usage'] = df['usage']
res['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not installed')
class TestCommon(unittest.TestCase):
def test_convert_list(self):
obj = r('list(a=1, b=2, c=3)')
converted = | com.convert_robj(obj) | pandas.rpy.common.convert_robj |
# -*- coding: utf-8 and gbk -*-
"""
Created on Sat Oct 16 08:44:53 2021
@author: <NAME>
"""
# from __future__ import division
from tensorflow.keras.models import Sequential
# from nltk.book import *
import pandas as pd
import numpy as np
np.set_printoptions(threshold=np.inf) # 将numpy数组显示完全
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib
# from matplotlib import pyplot
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
import tensorflow as tf
from pandas import DataFrame
matplotlib.rc("font", family='YouYuan') # 设置画图字体 不然中文乱码
from sklearn.preprocessing import MinMaxScaler
# 以下是实现的函数(模块化)
# 获取并合并因素函数:只用到了df1和df2,基本没用到df3(关键不知怎么用weather)
def My_GetAndMerge():
df1 = pd.read_csv(r"E:\deep-learning\course-design-main\resources\trips.csv", encoding='gbk');
df1['进站时间'] = pd.to_datetime(df1['进站时间'])
df1 = df1.set_index(df1["进站时间"])
df2 = pd.read_csv(r"E:\deep-learning\course-design-main\resources\hoilday2020.csv", encoding='gbk');
df3 = pd.read_csv(r"E:\deep-learning\course-design-main\resources\weather2020.csv", encoding='gbk');
df1['day'] = pd.to_datetime(df1['进站时间']).dt.date
df2['day'] = pd.to_datetime(df2['day']).dt.date
df3['day'] = | pd.to_datetime(df3['day']) | pandas.to_datetime |
from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from sid.msm import _flatten_index
from sid.msm import _harmonize_input
from sid.msm import _is_diagonal
from sid.msm import get_diag_weighting_matrix
from sid.msm import get_flat_moments
from sid.msm import get_msm_func
def dummy_simulate(_params): # noqa: U101
return pd.Series([1, 2])
def dummy_calc_moments(df):
return df
@pytest.mark.end_to_end
def test_estimation_with_msm():
s = pd.Series([1, 2])
msm_func = get_msm_func(
simulate=dummy_simulate,
calc_moments=dummy_calc_moments,
empirical_moments=s,
replace_nans=lambda x: x,
additional_outputs={"sr": lambda x: x},
)
result = msm_func(None)
expected = {
"value": 0,
"root_contributions": pd.Series([0.0, 0.0], ["0_0", "0_1"]),
"empirical_moments": {0: s},
"simulated_moments": {0: s},
"sr": pd.Series([1, 2]),
}
for k, v in result.items():
if k == "value":
assert v == expected[k]
else:
if isinstance(v, dict):
for kk, vv in v.items():
vv.equals(expected[k][kk])
else:
v.equals(expected[k])
@pytest.mark.integration
@pytest.mark.parametrize(
"empirical_moments, weights, expected",
[({"a": | pd.Series([1]) | pandas.Series |
# Utilities
import re
import pickle
import numpy as np
import pandas as pd
import tensorflow.keras.metrics as metrics
from gensim.models import KeyedVectors
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Bidirectional, GlobalMaxPool1D, Dense, LSTM, Conv1D, Embedding
from tensorflow.keras.models import save_model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau,TensorBoard
# Importing the dataset
DATASET_ENCODING = "ISO-8859-1"
raw_dataset = pd.read_csv('assets/twitter_nlp/training.1600000.processed.noemoticon.csv',encoding=DATASET_ENCODING)
dataset = pd.DataFrame(columns=["Sentiment", "SentimentText"])
dataset = dataset.fillna(0)
negative = 0
positive = 0
for index, row in raw_dataset.iterrows():
if negative < 50000 and row[0] == 0:
dataset["Sentiment"] = row[0]
dataset["SentimentText"] = row[5]
negative+=1
elif positive < 50000 and row[0] == 4:
dataset["Sentiment"] = row[0]
dataset["SentimentText"] = row[5]
positive+=1
elif positive == 50000 and negative == 50000:
break
# Reading contractions.csv and storing it as a dict.
contractions = | pd.read_csv('assets/contractions.csv', index_col='Contraction') | pandas.read_csv |
import pandas as pd
import numpy as np
import pickle
from scipy.sparse import *
from sklearn.model_selection import train_test_split
SEED = 5525
def update_index(df):
index_set = set()
for i in df.tolist():
index_set.update(set(i))
indices = list(index_set)
indices.sort()
return indices
def split_dataset(dataset):
with open(dataset, 'rb') as handle:
df = pickle.load(handle)
#df = df.iloc[:10000,]
df = df[df['TEXT_ID'].map(len)>=2]
X = df.iloc[:, 0]
Y = df.iloc[:, 1]
indices = update_index(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=SEED)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state=SEED)
return X_train, X_test, X_val, Y_train, Y_test, Y_val, indices
def transfer_to_csr(raw_df, num_col, indices):
shape_csr = (len(raw_df), num_col)
row = []
col = []
row_idx = 0
for emoji_ids in raw_df.tolist():
tempEmoji = set(emoji_ids)
row += [row_idx] * len(tempEmoji)
idx = [indices.index(i) for i in emoji_ids]
col += idx
row_idx += 1
data = [1]*len(row)
return csr_matrix((data, (row, col)), shape=shape_csr)
def make_same_length(data, sequence_length):
features = np.zeros((len(data), sequence_length), dtype=int)
for i, text in enumerate(data):
text_len = len(text)
if (text_len <= sequence_length):
zeros = list(np.zeros(sequence_length - text_len))
new = text + zeros
else:
new = text[:sequence_length]
features[i, :] = np.array(new)
return features
def load_data_nn(dataset, num_class):
X_train, X_test, X_val, Y_train, Y_test, Y_val, indices = split_dataset(dataset)
X_train = make_same_length(X_train, 128)
X_test = make_same_length(X_test, 128)
X_val = make_same_length(X_val, 128)
# Y_train = transfer_to_csr(Y_train, num_class, indices).todense()
# Y_test = transfer_to_csr(Y_test, num_class, indices).todense()
# Y_val = transfer_to_csr(Y_val, num_class, indices).todense()
Y_train = transfer_to_csr(Y_train, num_class, indices)
Y_test = transfer_to_csr(Y_test, num_class, indices)
Y_val = transfer_to_csr(Y_val, num_class, indices)
num_pos = np.sum(Y_train, axis=0) + np.sum(Y_test, axis=0) + np.sum(Y_val, axis=0)
# weight_pos = ((len(Y_train) + len(Y_test) + len(Y_val)) - num_pos) / num_pos
weight_pos = ((Y_train.shape[0] + Y_test.shape[0] + Y_val.shape[0]) - num_pos) / num_pos
return X_train, X_test, X_val, Y_train, Y_test, Y_val, weight_pos
# list = annotations of 50 most frequently used emojis
def locate_emojis(dataset, list, outfile):
file = pd.read_csv(dataset)
emoji_id = file.iloc[:,0]
emojis = []
for i in list:
emojis.append([i, emoji_id[i]])
emojis = | pd.DataFrame(emojis) | pandas.DataFrame |
import pandas as pd
import glob
## concatenate data frames into one
path = "HMXB_output/*"
all_param_files = glob.glob(path)
#df = pd.read_csv("./0_params.csv")
df = pd.DataFrame()
for pfile in all_param_files:
#if pfile == "0_params.csv": continue
pf = | pd.read_csv(pfile) | pandas.read_csv |
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import copy
import datetime
import os
import numpy as np
import pandas as pd
import pytest
import metview as mv
from metview import bindings
from metview.param import ParamInfo
from metview.indexer import GribIndexer
PATH = os.path.dirname(__file__)
DB_COLUMNS = copy.deepcopy(GribIndexer.DEFAULT_KEYS)
DB_COLUMNS["_msgIndex1"] = ("l", np.int64, False)
DB_COLUMNS_WIND2 = copy.deepcopy(DB_COLUMNS)
DB_COLUMNS_WIND2["_msgIndex2"] = ("l", np.int64, False)
DB_DEFAULT_COLUMN_NAMES = list(GribIndexer.DEFAULT_KEYS.keys())
def file_in_testdir(filename):
return os.path.join(PATH, filename)
def build_index_db_dataframe(column_data, key_def=None):
c = {v: column_data[i] for i, v in enumerate(list(key_def.keys()))}
pd_types = {k: v[1] for k, v in key_def.items()}
return pd.DataFrame(c).astype(pd_types)
def test_fieldset_select_single_file():
f = mv.read(file_in_testdir("tuv_pl.grib"))
assert f._db is None
# ------------------------
# single resulting field
# ------------------------
g = f.select(shortName="u", level=700)
assert len(g) == 1
assert mv.grib_get(g, ["shortName", "level:l"]) == [["u", 700]]
g1 = mv.read(data=f, param="u", levelist=700)
d = g - g1
assert np.allclose(d.values(), np.zeros(len(d.values())))
# check index db contents
assert g._db is not None
assert "scalar" in g._db.blocks
assert len(g._db.blocks) == 1
md = [
["u"],
[131],
[20180801],
[1200],
["0"],
[700],
["isobaricInhPa"],
["0"],
["0001"],
["od"],
["oper"],
["an"],
[0],
]
df_ref = build_index_db_dataframe(md, key_def=DB_COLUMNS)
# print(df_ref.dtypes)
# print(g._db.blocks)
df = g._db.blocks["scalar"]
# print(df.dtypes)
if not df.equals(df_ref):
print(df.compare(df_ref))
assert False
# ------------------------------------
# single resulting field - paramId
# ------------------------------------
g = f.select(paramId=131, level=700)
assert len(g) == 1
assert mv.grib_get(g, ["paramId:l", "level:l"]) == [[131, 700]]
g1 = mv.read(data=f, param="131.128", levelist=700)
d = g - g1
assert np.allclose(d.values(), np.zeros(len(d.values())))
# check index db contents
assert g._db is not None
assert "scalar" in g._db.blocks
assert len(g._db.blocks) == 1
md = [
["u"],
[131],
[20180801],
[1200],
["0"],
[700],
["isobaricInhPa"],
["0"],
["0001"],
["od"],
["oper"],
["an"],
[0],
]
df_ref = build_index_db_dataframe(md, key_def=DB_COLUMNS)
df = g._db.blocks["scalar"]
if not df.equals(df_ref):
print(df.compare(df_ref))
assert False
# -------------------------
# multiple resulting fields
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
assert f._db is None
g = f.select(shortName=["t", "u"], level=[700, 500])
assert len(g) == 4
assert mv.grib_get(g, ["shortName", "level:l"]) == [
["t", 500],
["t", 700],
["u", 500],
["u", 700],
]
assert g._db is not None
assert len(g._db.blocks) == 1
assert "scalar" in g._db.blocks
md = [
["t", "t", "u", "u"],
[130, 130, 131, 131],
[20180801] * 4,
[1200] * 4,
[0] * 4,
[500, 700, 500, 700],
["isobaricInhPa"] * 4,
["0"] * 4,
["0001"] * 4,
["od"] * 4,
["oper"] * 4,
["an"] * 4,
[0, 1, 2, 3],
]
df_ref = build_index_db_dataframe(md, key_def=DB_COLUMNS)
df = g._db.blocks["scalar"]
if not df.equals(df_ref):
print(df.compare(df_ref))
assert False
# -------------------------
# empty result
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
g = f.select(shortName="w")
assert isinstance(g, mv.Fieldset)
assert len(g) == 0
# -------------------------
# invalid key
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
g = f.select(INVALIDKEY="w")
assert isinstance(g, mv.Fieldset)
assert len(g) == 0
# -------------------------
# mars keys
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
assert f._db is None
g = f.select(shortName=["t"], level=[500, 700], marsType="an")
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l", "marsType"]) == [
["t", 500, "an"],
["t", 700, "an"],
]
g = f.select(shortName=["t"], level=[500, 700], type="an")
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l", "type"]) == [
["t", 500, "an"],
["t", 700, "an"],
]
# check the index db contents. "type" must be mapped to the "marsType" column of the
# db so no rescanning should happen. The db should only contain the default set of columns.
assert g._db is not None
assert "scalar" in g._db.blocks
assert len(g._db.blocks) == 1
assert list(g._db.blocks["scalar"].keys())[:-1] == DB_DEFAULT_COLUMN_NAMES
g = f.select(shortName=["t"], level=[500, 700], type="fc")
assert len(g) == 0
g = f.select({"shortName": "t", "level": [500, 700], "mars.type": "an"})
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l", "mars.type"]) == [
["t", 500, "an"],
["t", 700, "an"],
]
# -------------------------
# custom keys
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
assert f._db is None
g = f.select(shortName=["t"], level=[500, 700], gridType="regular_ll")
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l", "gridType"]) == [
["t", 500, "regular_ll"],
["t", 700, "regular_ll"],
]
g = f.select({"shortName": ["t"], "level": [500, 700], "mars.param:s": "130.128"})
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l", "mars.param"]) == [
["t", 500, "130.128"],
["t", 700, "130.128"],
]
assert g._db is not None
assert "scalar" in g._db.blocks
assert len(g._db.blocks) == 1
assert list(g._db.blocks["scalar"].keys())[:-1] == [
*DB_DEFAULT_COLUMN_NAMES,
"gridType",
"mars.param:s",
]
# -------------------------
# wind
# -------------------------
f = mv.read(file_in_testdir("tuv_pl.grib"))
assert f._db is None
g = f.select(shortName="wind", level=700)
assert len(g) == 2
assert mv.grib_get(g, ["shortName", "level:l"]) == [
["u", 700],
["v", 700],
]
assert g._db is not None
assert len(g._db.blocks) == 1
assert "wind" in g._db.blocks
md = [
["wind"],
[131],
[20180801],
[1200],
[0],
[700],
["isobaricInhPa"],
["0"],
["0001"],
["od"],
["oper"],
["an"],
[0],
[1],
]
df_ref = build_index_db_dataframe(md, key_def=DB_COLUMNS_WIND2)
df = g._db.blocks["wind"]
if not df.equals(df_ref):
print(df.compare(df_ref))
assert False
def test_fieldset_select_date():
# date and time
f = mv.read(file_in_testdir("t_time_series.grib"))
assert f._db is None
g = f.select(date="20201221", time="12", step="9")
assert len(g) == 2
ref_keys = ["shortName", "date", "time", "step"]
ref = [
["t", "20201221", "1200", "9"],
["z", "20201221", "1200", "9"],
]
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time="1200", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time="12:00", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time=12, step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date="2020-12-21", time=1200, step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(
date=datetime.datetime(2020, 12, 21),
time=datetime.time(hour=12, minute=0),
step=9,
)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# dataDate and dataTime
g = f.select(dataDate="20201221", dataTime="12", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(dataDate="2020-12-21", dataTime="12:00", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# validityDate and validityTime
g = f.select(validityDate="20201221", validityTime="21")
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(validityDate="2020-12-21", validityTime="21:00")
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# dateTime
g = f.select(dateTime="2020-12-21 12:00", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# dataDateTime
g = f.select(dataDateTime="2020-12-21 12:00", step=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# validityDateTime
g = f.select(validityDateTime="2020-12-21 21:00")
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
# ------------------------------------
# check multiple dates/times
# ------------------------------------
ref = [
["t", "20201221", "1200", "3"],
["t", "20201221", "1200", "9"],
["z", "20201221", "1200", "3"],
["z", "20201221", "1200", "9"],
]
# date and time
g = f.select(date="2020-12-21", time=12, step=[3, 9])
assert len(g) == 4
assert mv.grib_get(g, ref_keys) == ref
# dateTime
g = f.select(dateTime="2020-12-21 12:00", step=[3, 9])
assert len(g) == 4
assert mv.grib_get(g, ref_keys) == ref
# validityDate and validityTime
g = f.select(validityDate="2020-12-21", validityTime=[15, 21])
assert len(g) == 4
assert mv.grib_get(g, ref_keys) == ref
# validityDateTime
g = f.select(validityDateTime=["2020-12-21 15:00", "2020-12-21 21:00"])
assert len(g) == 4
assert mv.grib_get(g, ref_keys) == ref
# ------------------------------------
# check times with 1 digit hours
# ------------------------------------
# we create a new fieldset
f = mv.merge(f[0], mv.grib_set_long(f[2:4], ["time", 600]))
ref = [
["t", "20201221", "0600", "3"],
["z", "20201221", "0600", "3"],
]
g = f.select(date="20201221", time="6", step="3")
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time="06", step=3)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time="0600", step=3)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(date=20201221, time="06:00", step=3)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(validityDate="2020-12-21", validityTime=9)
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(validityDate="2020-12-21", validityTime="09")
assert len(g) == 2
assert mv.grib_get(g, ref_keys) == ref
g = f.select(validityDate="2020-12-21", validityTime=18)
assert len(g) == 0
def test_fieldset_select_multi_file():
f = mv.read(file_in_testdir("tuv_pl.grib"))
f.append(mv.read(file_in_testdir("ml_data.grib")))
assert f._db is None
# single resulting field
g = f.select(shortName="t", level=61)
# print(f._db.blocks)
assert len(g) == 1
assert mv.grib_get(g, ["shortName", "level:l", "typeOfLevel"]) == [
["t", 61, "hybrid"]
]
g1 = mv.read(data=f, param="t", levelist=61, levtype="ml")
d = g - g1
assert np.allclose(d.values(), np.zeros(len(d.values())))
assert g._db is not None
assert len(g._db.blocks) == 1
assert "scalar" in g._db.blocks
md = [
["t"],
[130],
[20180111],
[1200],
[12],
[61],
["hybrid"],
[None],
["0001"],
["od"],
["oper"],
["fc"],
[0],
]
df_ref = build_index_db_dataframe(md, key_def=DB_COLUMNS)
df = g._db.blocks["scalar"]
if not df.equals(df_ref):
print(df.compare(df_ref))
assert False
def test_param_info():
# no extra info
p = ParamInfo.build_from_name("2t")
assert p.name == "2t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] is None
p = ParamInfo.build_from_name("msl")
assert p.name == "msl"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] is None
p = ParamInfo.build_from_name("t500")
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
p = ParamInfo.build_from_name("t500hPa")
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
p = ParamInfo.build_from_name("t")
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == ""
assert p.meta["level"] is None
p = ParamInfo.build_from_name("t320K")
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "theta"
assert p.meta["level"] == 320
p = ParamInfo.build_from_name("t72ml")
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "hybrid"
assert p.meta["level"] == 72
p = ParamInfo.build_from_name("wind10m")
assert p.name == "wind10m"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] is None
p = ParamInfo.build_from_name("wind100")
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 100
p = ParamInfo.build_from_name("wind700")
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 700
p = ParamInfo.build_from_name("wind")
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == ""
assert p.meta["level"] == None
p = ParamInfo.build_from_name("wind3d")
assert p.name == "wind3d"
assert p.scalar == False
assert p.meta["typeOfLevel"] == ""
assert p.meta["level"] == None
p = ParamInfo.build_from_name("wind3d500")
assert p.name == "wind3d"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
# exta info
param_level_types = {
"2t": ["surface"],
"msl": ["surface"],
"wind10m": ["surface"],
"t": ["isobaricInhPa", "theta"],
"wind": ["isobaricInhPa"],
}
p = ParamInfo.build_from_name("2t", param_level_types=param_level_types)
assert p.name == "2t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] == None
try:
p = ParamInfo.build_from_name("22t", param_level_types=param_level_types)
assert False
except:
pass
# p = ParamInfo.build_from_name("t2", param_level_types=param_level_types)
# assert p.name == "2t"
# assert p.level_type == "surface"
# assert p.level is None
p = ParamInfo.build_from_name("msl", param_level_types=param_level_types)
assert p.name == "msl"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] == None
p = ParamInfo.build_from_name("t500", param_level_types=param_level_types)
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
p = ParamInfo.build_from_name("t500hPa", param_level_types=param_level_types)
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
p = ParamInfo.build_from_name("t", param_level_types=param_level_types)
assert p.name == "t"
assert p.scalar == True
assert "typeOfLevel" not in p.meta
assert "level" not in p.meta
p = ParamInfo.build_from_name("t320K", param_level_types=param_level_types)
assert p.name == "t"
assert p.scalar == True
assert p.meta["typeOfLevel"] == "theta"
assert p.meta["level"] == 320
try:
p = ParamInfo.build_from_name("t72ml", param_level_types=param_level_types)
assert False
except:
pass
p = ParamInfo.build_from_name("wind10m", param_level_types=param_level_types)
assert p.name == "wind10m"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "surface"
assert p.meta["level"] is None
p = ParamInfo.build_from_name("wind100", param_level_types=param_level_types)
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 100
p = ParamInfo.build_from_name("wind700", param_level_types=param_level_types)
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 700
p = ParamInfo.build_from_name("wind", param_level_types=param_level_types)
assert p.name == "wind"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] is None
try:
p = ParamInfo.build_from_name("wind3d", param_level_types=param_level_types)
assert False
except:
pass
param_level_types["wind3d"] = ["isobaricInhPa"]
p = ParamInfo.build_from_name("wind3d", param_level_types=param_level_types)
assert p.name == "wind3d"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] is None
p = ParamInfo.build_from_name("wind3d500", param_level_types=param_level_types)
assert p.name == "wind3d"
assert p.scalar == False
assert p.meta["typeOfLevel"] == "isobaricInhPa"
assert p.meta["level"] == 500
def test_param_info_from_fs_single_file():
f = mv.read(file_in_testdir("tuv_pl.grib"))
g = f["u700"]
p = g.param_info
assert p.name == "u"
assert p.scalar == True
md = {
"shortName": "u",
"paramId": 131,
"date": 20180801,
"time": 1200,
"step": "0",
"level": 700,
"typeOfLevel": "isobaricInhPa",
"number": "0",
"experimentVersionNumber": "0001",
"marsClass": "od",
"marsStream": "oper",
"marsType": "an",
"_msgIndex1": 0,
}
assert md == p.meta
g = f["wind500"]
p = g.param_info
assert p.name == "wind"
assert p.scalar == False
md = {
"shortName": "wind",
"paramId": 131,
"date": 20180801,
"time": 1200,
"step": "0",
"level": 500,
"typeOfLevel": "isobaricInhPa",
"number": "0",
"experimentVersionNumber": "0001",
"marsClass": "od",
"marsStream": "oper",
"marsType": "an",
"_msgIndex1": 0,
"_msgIndex2": 1,
}
assert md == p.meta
# we lose the db
g = g + 0
p = g.param_info
assert p.name == "wind"
assert p.scalar == False
md = {
"shortName": "u",
"paramId": 131,
"date": 20180801,
"time": 1200,
"step": "0",
"level": 500,
"typeOfLevel": "isobaricInhPa",
"number": "0",
"experimentVersionNumber": "0001",
"marsClass": "od",
"marsStream": "oper",
"marsType": "an",
}
assert md == p.meta
g = f["t"]
p = g.param_info
assert p.name == "t"
assert p.scalar == True
md = {
"shortName": "t",
"paramId": 130,
"date": 20180801,
"time": 1200,
"step": "0",
"level": None,
"typeOfLevel": "isobaricInhPa",
"number": "0",
"experimentVersionNumber": "0001",
"marsClass": "od",
"marsStream": "oper",
"marsType": "an",
"_msgIndex1": 0,
}
assert md == p.meta
# we lose the db
g = g + 0
p = g.param_info
assert p.name == "t"
assert p.scalar == True
md = {
"shortName": "t",
"paramId": 130,
"date": 20180801,
"time": 1200,
"step": "0",
"level": 300,
"typeOfLevel": "isobaricInhPa",
"number": "0",
"experimentVersionNumber": "0001",
"marsClass": "od",
"marsStream": "oper",
"marsType": "an",
}
assert md == p.meta
def test_fieldset_select_operator_single_file():
f = mv.read(file_in_testdir("tuv_pl.grib"))
g = f["u700"]
assert f._db is not None
assert g._db is not None
assert len(g) == 1
assert mv.grib_get(g, ["shortName", "level:l"]) == [["u", 700]]
g1 = mv.read(data=f, param="u", levelist=700)
d = g - g1
assert np.allclose(d.values(), np.zeros(len(d.values())))
g = f["t"]
assert len(g) == 6
assert mv.grib_get(g, ["shortName", "level:l"]) == [
["t", 300],
["t", 400],
["t", 500],
["t", 700],
["t", 850],
["t", 1000],
]
try:
g = f["w"]
assert False
except:
pass
def test_fieldset_select_operator_multi_file():
f = mv.read(file_in_testdir("tuv_pl.grib"))
f.append(mv.read(file_in_testdir("ml_data.grib")))
assert f._db is None
# single resulting field
g = f["t61ml"]
assert f._db is not None
assert g._db is not None
assert len(g) == 1
assert mv.grib_get(g, ["shortName", "level:l", "typeOfLevel"]) == [
["t", 61, "hybrid"]
]
g1 = mv.read(data=f, param="t", levelist=61, levtype="ml")
d = g - g1
assert np.allclose(d.values(), np.zeros(len(d.values())))
def test_indexer_dataframe_sort_value_with_key():
md = {
"paramId": [1, 2, 1, 2, 3],
"level": [925, 850, 925, 850, 850],
"step": ["12", "110", "1", "3", "1"],
"rest": ["1", "2", "aa", "b1", "1b"],
}
md_ref = {
"paramId": [1, 1, 2, 2, 3],
"level": [925, 925, 850, 850, 850],
"step": ["1", "12", "3", "110", "1"],
"rest": ["aa", "1", "b1", "2", "1b"],
}
df = pd.DataFrame(md)
df = GribIndexer._sort_dataframe(df)
df_ref = | pd.DataFrame(md_ref) | pandas.DataFrame |
from common_code.common import *
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib import tight_layout
mpl.style.use('seaborn-poster')
sns.set_palette(sns.color_palette(['#43406b', '#d15a00', '#27f77d']))
# sns.palplot(sns.color_palette(['#43406b', '#d15a00', '#27f77d']))
# sns.set_palette('cubehelix')
font = {'size' : 17}
mpl.rc('font', **font)
histcolor = '#143f7a'
conscmap = mpl.colors.LinearSegmentedColormap.from_list("", ["#142c89", "#142c89"])
def init_gridspec(nrow, ncol, nax) :
fig = plt.figure(figsize=(15, 15))
gs = gridspec.GridSpec(nrow, ncol, figure=fig)
axes = []
for i in range(nax) :
axes.append(plt.subplot(gs[i//ncol, i%ncol]))
return fig, axes
def adjust_gridspec() :
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
def show_gridspec() :
plt.show()
def set_square_aspect(axes) :
x0,x1 = axes.get_xlim()
y0,y1 = axes.get_ylim()
axes.set_aspect(abs(x1-x0)/abs(y1-y0))
def save_axis_in_file(fig, ax, dirname, filename):
ax.set_title("")
renderer = tight_layout.get_renderer(fig)
inset_tight_bbox = ax.get_tightbbox(renderer)
extent = inset_tight_bbox.transformed(fig.dpi_scale_trans.inverted())
plt.savefig(os.path.join(dirname, filename + '.png'), bbox_inches=extent, dpi=1000)
"""
renderer = tight_layout.get_renderer(fig)
inset_tight_bbox = ax.get_tightbbox(renderer)
extent = inset_tight_bbox.transformed(fig.dpi_scale_trans.inverted())
plt.savefig(os.path.join(dirname, filename + '.svg'), bbox_inches=extent)
if 'sst' not in dirname and 'readmission' not in dirname:
ax.set_xlabel(" ")
ax.set_ylabel(" ")
renderer = tight_layout.get_renderer(fig)
inset_tight_bbox = ax.get_tightbbox(renderer)
extent = inset_tight_bbox.transformed(fig.dpi_scale_trans.inverted())
plt.savefig(os.path.join(dirname, filename + '.pdf'), bbox_inches=extent)
"""
def save_table_in_file(table, dirname, filename) :
table.to_csv(os.path.join(dirname, filename + '.csv'), index=True)
def annotate(ax, xlabel=None, ylabel=None, title=None, xlim=None, ylim=None, legend='upper left') :
if xlabel is not None : ax.set_xlabel(xlabel, fontsize=20)
if ylabel is not None : ax.set_ylabel(ylabel, fontsize=20)
ax.tick_params(labelsize=20)
if title is not None : ax.set_title(title)
if xlim is not None : ax.set_xlim(*xlim)
if ylim is not None : ax.set_ylim(*ylim)
set_square_aspect(ax)
sns.despine(ax=ax)
if legend is None and ax.get_legend() is not None : ax.get_legend().remove()
if legend is not None:
ax.legend(loc=2,prop=dict(size=9))
###########################################################################################################################
def plot_measure_histogram_by_class(ax, spcorr, yhat, bins=30) :
sprho = np.array([x for x in spcorr])
# sppval = np.array([x[1] for x in spcorr])
measures = { "mean" : {}, "std" : {}}
# measures['pval_sig']["Overall"] = "{:.2f}".format((sppval <= 0.05).sum() / len(sppval))
measures['mean']["Overall"] = np.mean(sprho)
measures['std']["Overall"] = np.std(sprho)
unique_y = None
if len(yhat.shape) == 1 or yhat.shape[1] == 1:
yhat = yhat.flatten()
yhat = np.round(yhat)
unique_y = np.sort(np.unique(yhat))
if unique_y is not None and len(unique_y) < 4:
for y in unique_y :
rho = sprho[yhat == y]
# pval = sppval[yhat == y]
# measures['pval_sig'][str(int(y))] = "{:.2f}".format((pval <= 0.05).sum() / len(pval))
measures['mean'][str(int(y))] = np.mean(rho)
measures['std'][str(int(y))] = np.std(rho)
ax.hist(rho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(rho))/len(rho))
else :
ax.hist(sprho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(sprho))/len(sprho))
return pd.DataFrame(measures)
def plot_histogram_by_class(ax, spcorr, yhat, bins=30) :
sprho = np.array([x for x in spcorr])
# sppval = np.array([x[1] for x in spcorr])
measures = { "mean" : {}, "std" : {}}
# measures['pval_sig']["Overall"] = "{:.2f}".format((sppval <= 0.05).sum() / len(sppval))
measures['mean']["Overall"] = np.mean(sprho)
measures['std']["Overall"] = np.std(sprho)
unique_y = None
if len(yhat.shape) == 1 or yhat.shape[1] == 1:
yhat = yhat.flatten()
yhat = np.round(yhat)
unique_y = np.sort(np.unique(yhat))
if unique_y is not None and len(unique_y) < 4:
for y in unique_y :
rho = sprho[yhat == y]
# pval = sppval[yhat == y]
# measures['pval_sig'][str(int(y))] = "{:.2f}".format((pval <= 0.05).sum() / len(pval))
measures['mean'][str(int(y))] = np.mean(rho)
measures['std'][str(int(y))] = np.std(rho)
ax.hist(rho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(rho))/len(rho))
else :
ax.hist(sprho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(sprho))/len(sprho))
return pd.DataFrame(measures)
def plot_SP_histogram_by_class(ax, spcorr, yhat, bins=30) :
sprho = np.array([x[0] for x in spcorr])
sppval = np.array([x[1] for x in spcorr])
measures = {"pval_sig" : {}, "mean" : {}, "std" : {}}
measures['pval_sig']["Overall"] = "{:.2f}".format((sppval <= 0.05).sum() / len(sppval))
measures['mean']["Overall"] = np.mean(sprho)
measures['std']["Overall"] = np.std(sprho)
unique_y = None
if len(yhat.shape) == 1 or yhat.shape[1] == 1:
yhat = yhat.flatten()
yhat = np.round(yhat)
unique_y = np.sort(np.unique(yhat))
if unique_y is not None and len(unique_y) < 4:
for y in unique_y :
rho = sprho[yhat == y]
pval = sppval[yhat == y]
measures['pval_sig'][str(int(y))] = "{:.2f}".format((pval <= 0.05).sum() / len(pval))
measures['mean'][str(int(y))] = np.mean(rho)
measures['std'][str(int(y))] = np.std(rho)
ax.hist(rho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(rho))/len(rho))
else :
ax.hist(sprho, bins=bins, range=(-1.0, 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=np.ones(len(sprho))/len(sprho))
return pd.DataFrame(measures)
def plot_measure_density_by_class(ax, spcorr, yhat, linestyle='-', label=None) :
sprho = np.array([x for x in spcorr])
# sppval = np.array([x[1] for x in spcorr])
measures = {"mean" : {}, "std" : {}}
unique_y = None
if len(yhat.shape) == 1 or yhat.shape[1] == 1:
yhat = yhat.flatten()
yhat = np.round(yhat)
unique_y = np.sort(np.unique(yhat))
if False : #unique_y is not None and len(unique_y) < 4:
for y in unique_y :
rho = sprho[yhat == y]
pval = sppval[yhat == y]
measures['pval_sig'][str(int(y))] = "{:.2f}".format((pval <= 0.05).sum() / len(pval))
measures['mean'][str(int(y))] = np.mean(rho)
measures['std'][str(int(y))] = np.std(rho)
sns.kdeplot(rho, linewidth=2, linestyle=linestyle, ax=ax)
else :
# measures['pval_sig']["Overall"] = "{:.2f}".format((sppval <= 0.05).sum() / len(sppval))
measures['mean']["Overall"] = np.mean(sprho)
measures['std']["Overall"] = np.std(sprho)
sns.kdeplot(sprho, linewidth=2, linestyle=linestyle, color='k', ax=ax, label=label)
return | pd.DataFrame(measures) | pandas.DataFrame |
import numpy as np
from numpy.fft import fft, ifft
# from: http://www.mirzatrokic.ca/FILES/codes/fracdiff.py
# small modification: wrapped 2**np.ceil(...) around int()
# https://github.com/SimonOuellette35/FractionalDiff/blob/master/question2.py
_default_thresh = 1e-4
def get_weights(d, size):
"""Expanding window fraction difference weights."""
w = [1.0]
for k in range(1, size):
w_ = -w[-1] / k * (d - k + 1)
w.append(w_)
w = np.array(w[::-1]).reshape(-1, 1)
return w
import numba
@numba.njit
def get_weights_ffd(d, thres, lim=99999):
"""Fixed width window fraction difference weights.
Set lim to be large if you want to only stop at thres.
Set thres to be zero if you want to ignore it.
"""
w = [1.0]
k = 1
for i in range(1, lim):
w_ = -w[-1] / k * (d - k + 1)
if abs(w_) < thres:
break
w.append(w_)
k += 1
w = np.array(w[::-1]).reshape(-1, 1)
return w
def frac_diff_ffd(x, d, thres=_default_thresh, lim=None):
assert isinstance(x, np.ndarray)
assert x.ndim == 1
if lim is None:
lim = len(x)
w, out = _frac_diff_ffd(x, d, lim, thres=thres)
# print(f'weights is shape {w.shape}')
return out
# this method was not faster
# def frac_diff_ffd_stride_tricks(x, d, thres=_default_thresh):
# """d is any positive real"""
# assert isinstance(x, np.ndarray)
# w = get_weights_ffd(d, thres, len(x))
# width = len(w) - 1
# output = np.empty(len(x))
# output[:width] = np.nan
# output[width:] = np.dot(np.lib.stride_tricks.as_strided(x, (len(x) - width, len(w)), (x.itemsize, x.itemsize)), w[:,0])
# return output
@numba.njit
def _frac_diff_ffd(x, d, lim, thres=_default_thresh):
"""d is any positive real"""
w = get_weights_ffd(d, thres, lim)
width = len(w) - 1
output = []
output.extend([np.nan] * width) # the first few entries *were* zero, should be nan?
for i in range(width, len(x)):
output.append(np.dot(w.T, x[i - width: i + 1])[0])
return w, np.array(output)
def fast_frac_diff(x, d):
"""expanding window version using fft form"""
assert isinstance(x, np.ndarray)
T = len(x)
np2 = int(2 ** np.ceil(np.log2(2 * T - 1)))
k = np.arange(1, T)
b = (1,) + tuple(np.cumprod((k - d - 1) / k))
z = (0,) * (np2 - T)
z1 = b + z
z2 = tuple(x) + z
dx = ifft(fft(z1) * fft(z2))
return np.real(dx[0:T])
# TESTS
def test_all():
for d in [0.3, 1, 1.5, 2, 2.5]:
test_fast_frac_diff_equals_fracDiff_original_impl(d=d)
test_frac_diff_ffd_equals_original_impl(d=d)
# test_frac_diff_ffd_equals_prado_original(d=d) # his implementation is busted for fractional d
# def test_frac_diff_ffd_equals_prado_original(d=3):
# # ignore this one for now as Prado's version does not work
# from .prado_orig import fracDiff_FFD_prado_original
# import pandas as pd
#
# x = np.random.randn(100)
# a = frac_diff_ffd(x, d, thres=_default_thresh)
# b = fracDiff_FFD_prado_original(pd.DataFrame(x), d, thres=_default_thresh)
# b = np.squeeze(b.values)
# a = a[d:] # something wrong with the frac_diff_ffd gives extra entries of zero
# assert np.allclose(a, b)
# # return locals()
def test_frac_diff_ffd_equals_original_impl(d=3):
from .prado_orig import fracDiff_FFD_original_impl
import pandas as pd
x = np.random.randn(100)
a = frac_diff_ffd(x, d, thres=_default_thresh)
b = fracDiff_FFD_original_impl(pd.DataFrame(x), d, thres=_default_thresh)
assert np.allclose(a, b)
# return locals()
def test_fast_frac_diff_equals_fracDiff_original_impl(d=3):
from .prado_orig import fracDiff_original_impl
import pandas as pd
x = np.random.randn(100)
a = fast_frac_diff(x, d)
b = fracDiff_original_impl( | pd.DataFrame(x) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
from keras_self_attention import SeqSelfAttention
# In[2]:
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
import warnings
pd.set_option("display.max_colwidth", 200)
# In[3]:
data=pd.read_csv("C:/Users/Ruba/Downloads/Authors/AuthorsMerg01.csv",nrows=100000)
# In[4]:
data.drop_duplicates(subset=['Text'],inplace=True)#dropping duplicates
data.dropna(axis=0,inplace=True)#dropping na
# In[5]:
data.info()
# In[6]:
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not",
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
"you're": "you are", "you've": "you have"}
# In[7]:
import nltk
nltk.download('stopwords')
# In[8]:
stop_words = set(stopwords.words('english'))
# In[9]:
def text_cleaner(text,num):
newString = text.lower()
newString = BeautifulSoup(newString, "lxml").text
newString = re.sub(r'\([^)]*\)', '', newString)
newString = re.sub('"','', newString)
newString = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")])
newString = re.sub(r"'s\b","",newString)
newString = re.sub("[^a-zA-Z]", " ", newString)
newString = re.sub('[m]{2,}', 'mm', newString)
if(num==0):
tokens = [w for w in newString.split() if not w in stop_words]
else:
tokens=newString.split()
long_words=[]
for i in tokens:
if len(i)>1: #removing short word
long_words.append(i)
return (" ".join(long_words)).strip()
# In[10]:
#call the function
cleaned_text = []
for t in data['Text']:
cleaned_text.append(text_cleaner(t,0))
# In[11]:
cleaned_text[:5]
# In[12]:
#call the function
cleaned_Author = []
for t in data['Author']:
cleaned_Author.append(text_cleaner(t,1))
# In[13]:
cleaned_Author[:10]
# In[14]:
#restore values of text / Author after cleaning process
data['cleaned_text']=cleaned_text
data['cleaned_Author']=cleaned_Author
# In[15]:
#remove empty spaces and NA ones
data.replace('', np.nan, inplace=True)
data.dropna(axis=0,inplace=True)
# In[16]:
# find the length of sentences in each TEXT / Summay to know the MIN.MAX ranges length
import matplotlib.pyplot as plt
text_word_count = []
Author_word_count = []
# populate the lists with sentence lengths
for i in data['cleaned_text']:
text_word_count.append(len(i.split()))
for i in data['cleaned_Author']:
Author_word_count.append(len(i.split()))
length_df = | pd.DataFrame({'text':text_word_count, 'Author':Author_word_count}) | pandas.DataFrame |
from bs4 import BeautifulSoup
from crossref.restful import Works
import datetime
from habanero import Crossref
import json
import lxml
import numpy as np
import os
import pandas as pd
import shutil
import random
import re
import requests
import time
from a0001_admin import clean_dataframe
from a0001_admin import name_paths
from a0001_admin import retrieve_datetime
from a0001_admin import retrieve_format
from a0001_admin import retrieve_list
from a0001_admin import retrieve_path
from a0001_admin import write_paths
from a0001_admin import work_completed
from a0001_admin import work_to_do
def acquire_pubs():
"""
"""
# set acquire_pubs as a task
work_completed('acquire_pubs', 0)
# search for pubs
search_term()
# make json folder
make_json_folder()
# retrieve metadata
search_web()
# retrieve metadata
crosssearch_crossref()
# summarize
highlight()
wait_time = random.random()*60 + 60
print('Wait: ' + str(round(wait_time,2)) + ' from ' + str(time_string))
time.sleep(wait_time)
# completed acquire_pubs
work_completed('acquire_pubs', 0)
def highlight():
"""
"""
json_src = os.path.join(retrieve_path('pub_json'))
for file in os.listdir(json_src):
json_file = open(os.path.join(json_src, file), 'r')
data = json_file.read()
json_file.close()
obj_dst = json.loads(data)
searched_list = list(obj_dst['searched'])
searched_list.append('summarize')
json_obj = {"searched": [searched_list],}
json_obj['doi'] = obj_dst['doi']
json_obj['doi_url'] = obj_dst['doi_url']
try:
json_obj['title'] = obj_dst['gscholar']['title']
except:
json_obj['title'] = None
try:
json_obj['citations'] = obj_dst['gscholar']['citations']
except:
json_obj['citations'] = 0
pub_affiliations = []
try:
authors = list(obj_dst['crossref_doi']['author'])
print('authors = ')
print(authos)
for author in authors:
index = authors.index(author)
affiliations = list(obj_dst['crossref_doi']['author'][index]['affiliation'])
for affiliation in affiliations:
index2 = affiliations.index(affiliation)
pub_affiliation = obj_dst['crossref_doi']['author'][index]['affiliation'][index2]['name']
if pub_affiliation not in pub_affiliations:
pub_affiliations.append(pub_affiliation)
except:
pub_affiliations = []
json_obj['affiliation'] = pub_affiliations
for search in list(obj_dst['searched']):
json_obj[search] = obj_dst[search]
json_file = open(os.path.join(json_src, file), 'w')
#data_json = json.dumps(w1, indent = 4, ensure_ascii = False)
#obj_json = json.dumps(obj_json, indent = 3, ensure_ascii = False)
json_obj = json.dumps(json_obj, indent = 3)
json_file.write(json_obj)
json_file.close()
def build_doi_url(doi):
"""
"""
doi_url = 'https://doi.org/'
doi_url = doi_url + str(doi)
return(doi_url)
def check_scraped(name_dataset, term, year, num):
"""
"""
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
paths_to_check = []
paths_to_check.append(os.path.join(retrieve_path(name_src)))
paths_to_check.append(os.path.join(retrieve_path('pub_gscholar_json')))
paths_to_check.append(os.path.join(retrieve_path('pub_crossref_json')))
paths_to_check.append(os.path.join(retrieve_path('pub_web_json')))
for path in paths_to_check:
print('path =')
print(path)
for file in os.listdir(path):
# check specific gscholar search
file_split = file.split('.')
if file_split[0] == term: return(True)
# find and compare file term to term passed into the function
pattern = '[a-z]+'
flags = re.IGNORECASE
file_term = re.findall(pattern, file, flags)
file_term = file_term[0]
if file_term != term: continue
print('file_term = ' + file_term + ' term = ' + term)
# find and compare file year to year passed into the function
pattern = '[0-9]{4}'
file_year = re.findall(pattern, file)
file_year = file_year[0]
if str(file_year) != str(year): continue
print('file_year = ' + file_year + ' year = ' + str(year))
# find and compare file year to year passed into the function
pattern = '[0-9]{3}'
file_num = re.findall(pattern, file)
file_num = file_num[1]
if str(file_num) != str(num): continue
print('file_num = ' + file_num + ' num = ' + str(num))
# find and compare file saved date to current date
file = file.split(' ')
file = file[3]
pattern = '[0-9]{4}' + '-' + '[0-9]{2}' + '-' + '[0-9]{2}'
file_date_saved = re.findall(pattern, file)
file_date_saved = file_date_saved[0]
print('file_date_saved = ' + file_date_saved)
a = file_date_saved.split('-')
a = datetime.datetime(int(a[0]), int(a[1]), int(a[2]), 0, 0)
#print('a = ' + str(a))
b = datetime.datetime.today()
#print('b = ' + str(b))
v = b-a
#print('v = ' + str(v))
v = int(v.days)
#print('v = ' + str(v))
if v < 10:
#print('date match: ' + str(v))
#print('too many days lapsed since last query.')
return(True)
return(False)
def check_num(term, year, num):
"""
True to not continue
False to continue
"""
src_path = os.path.join(retrieve_path('pub_gscholar_json'))
df_file = os.path.join(src_path, term + '.csv')
df = | pd.read_csv(df_file) | pandas.read_csv |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
from scipy import interpolate
#feature+activity1+activity2+attributenumber+extractionMethod+freqforfft
feature_combinations=["31112","31111","11121","21121","31125", "11113","3112616","311263"] #used for generalizing initial work
#dataframes equivalent to feature matrices for eating and cooking with 8 features
eatingcsv = pd.DataFrame( index=['eating1','eating2','eating3','eating4'], columns=['accelerometer_mean_x','accelerometer_std_x','orientation_std_y','gyro_std_y','accelerometer_max_y','orientation_rms_x','accelerometer_fft_y_17','accelerometer_fft_y_4'])
cookingcsv = pd.DataFrame( index=['cooking1','cooking2'], columns=['accelerometer_mean_x','accelerometer_std_x','orientation_std_y','gyro_std_y','accelerometer_max_y','orientation_rms_x','accelerometer_fft_y_17','accelerometer_fft_y_4'])
#loop 4 times as eating has 4 files / during last 2 iteration cooking data is ignored as cooking has 2 files to avoid separate looping for cooking
for k in range(4):
for j in range(len(feature_combinations)): #iterate 7 (number of features) times and do activity as described in feature_combination list
if (feature_combinations[j][0] == '1'):
# orientation
sensor = "orientation"
eatfood1 = pd.read_csv("Data/EatFood1/orientation-1533862083.csv")
eatfood2 = pd.read_csv("Data/EatFood2/orientation-1533862416.csv")
eatfood3 = pd.read_csv("Data/EatFood3/orientation-1533862913.csv")
eatfood4 = pd.read_csv("Data/EatFood4/orientation-1533864477.csv")
cooking1 = pd.read_csv("Data/Cooking1/orientation-1533863975.csv")
cooking2 = pd.read_csv("Data/Cooking2/orientation-1533864170.csv")
elif (feature_combinations[j][0] == '2'):
# gyro
sensor = "gyro"
eatfood1 = pd.read_csv("Data/EatFood1/gyro-1533862083.csv")
eatfood2 = pd.read_csv("Data/EatFood2/gyro-1533862416.csv")
eatfood3 = pd.read_csv("Data/EatFood3/gyro-1533862913.csv")
eatfood4 = pd.read_csv("Data/EatFood4/gyro-1533864477.csv")
cooking1 = | pd.read_csv("Data/Cooking1/gyro-1533863975.csv") | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.axes as ax
# creates the data for the HOAc/Ni(110) IR
colnames = ['Wavenumber', 'Intensity']
# 15 s
f1 = pd.read_csv("Ni(110) 1e-9Torr15s 210K.0.dpt", '\t', header=None, names=colnames)
f1.set_index(colnames[0], inplace=True)
f2 = pd.read_csv("Ni(110) 1e-9Torr15s 352K.0.dpt", '\t', header=None, names=colnames)
f2.set_index(colnames[0], inplace=True)
f3 = | pd.read_csv("Ni(110) 1e-9Torr15s 452K.0.dpt", '\t', header=None, names=colnames) | pandas.read_csv |
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wrapped_dataset
base_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'
data_dir = join(base_dir, 'data')
# Create wrapped CTF dataset
wrap_f = create_wrapped_dataset(data_dir, output_dataset_name="virtual.hdf5")
n_lstms = 512
n_repeats = 8
n_players = 4
map_id = 0
# Get matchups with all same agents (e.g. AA vs AA)
agent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]
matchup_ids = np.all(agent_ids[:, 0, :] ==
agent_ids[:, 0, 0][:, np.newaxis], axis=1)
n_matchups = np.sum(matchup_ids) # 0, 34, 49, 54
# Extract LSTMs for one map and matchup
lstms_matched = np.tanh(wrap_f['map/matchup/repeat/player/time/lstm'][
map_id, matchup_ids, ...].astype(np.float32))
print("Loaded LSTMs for within-population matchups")
# Loop through matchups, repeats, and players to compute PCA
k = n_lstms
lstm_pca = {}
for m in np.arange(n_matchups):
lstm_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pca[m][r] = {}
for p in np.arange(n_players):
lstm_pca[m][r][p] = {}
pca = PCA(n_components=k)
transformed = pca.fit_transform(
#zscore(lstms_matched[m, r, p], axis=0))
#np.tanh(lstms_matched[m, r, p]))
zscore(lstms_matched[m, r, p], axis=0))
lstm_pca[m][r][p]['transformed'] = transformed
lstm_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, player {p}")
np.save('results/pca_lstm_tanh-z_results.npy', lstm_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pca_long = {'population': [], 'repeat': [], 'player': [],
'variance explained': [], 'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for k, v in enumerate(lstm_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pca_long['population'].append(pops[m])
lstm_pca_long['repeat'].append(r)
lstm_pca_long['player'].append(p)
lstm_pca_long['variance explained'].append(v)
lstm_pca_long['dimension'].append(k + 1)
lstm_pca_long = pd.DataFrame(lstm_pca_long)
max_k = 30
lstm_pca_trunc = lstm_pca_long[lstm_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pca_trunc, x='dimension',
y='variance explained', hue='repeat',
col='population', col_wrap=2,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, ..., i]))
min = int(np.amin(percents_vaf[m, ..., i]))
max = int(np.amax(percents_vaf[m, ..., i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack pairs of players and compute joint PCA
pairs = list(combinations(np.arange(n_players), 2))
n_pairs = len(pairs)
k = n_lstms * 2
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lstm_pair_pca = {}
for m in np.arange(n_matchups):
lstm_pair_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pair_pca[m][r] = {}
for p, pair in enumerate(pairs):
lstm_pair_pca[m][r][p] = {}
stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],
lstms_matched[m, r, pair[1]]))
pca = PCA(n_components=k)
transformed = pca.fit_transform(
zscore(stack_lstm, axis=0))
lstm_pair_pca[m][r][p]['transformed'] = transformed
lstm_pair_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, pair {pair}")
np.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],
'variance explained': [], 'dimension': [],
'type': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
pair_type = {c:('cooperative' if c in coop_ids else 'competitive')
for c in np.arange(n_pairs)}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for k, v in enumerate(lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pair_pca_long['population'].append(pops[m])
lstm_pair_pca_long['repeat'].append(r)
lstm_pair_pca_long['pair'].append(p)
lstm_pair_pca_long['variance explained'].append(v)
lstm_pair_pca_long['dimension'].append(k + 1)
lstm_pair_pca_long['type'].append(pair_type[p])
lstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)
max_k = 10
lstm_pair_pca_trunc = lstm_pair_pca_long[
lstm_pair_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pair_pca_trunc, x='dimension',
y='variance explained', hue='type',
col='population', col_wrap=2, linewidth=3,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for type, c in zip(['cooperative', 'competitive'],
[coop_ids, comp_ids]):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, :, c, i]))
min = int(np.amin(percents_vaf[m, :, c, i]))
max = int(np.amax(percents_vaf[m, :, c, i]))
print(f"Population {pops[m]} {type}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack across all repeats and run PCA
k = n_lstms
lstm_stack_pca = {}
for m in np.arange(n_matchups):
lstm_stack_pca[m] = {}
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
lstm_stack_pca[m]['transformed'] = transformed
lstm_stack_pca[m]['pca'] = pca
print(f"Finished running stacked PCA for matchup {m}")
np.save('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_stack_pca_long = {'population': [], 'variance explained': [],
'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for k, v in enumerate(lstm_stack_pca[m][
'pca'].explained_variance_ratio_):
lstm_stack_pca_long['population'].append(pops[m])
lstm_stack_pca_long['variance explained'].append(v)
lstm_stack_pca_long['dimension'].append(k + 1)
lstm_stack_pca_long = | pd.DataFrame(lstm_stack_pca_long) | pandas.DataFrame |
"""Habitat risk assessment (HRA) model for InVEST."""
# -*- coding: UTF-8 -*-
import os
import logging
import pickle
import shutil
import tempfile
import numpy
from osgeo import gdal, ogr, osr
import pandas
import shapely.ops
import shapely.wkb
import taskgraph
import pygeoprocessing
from . import utils
from . import validation
LOGGER = logging.getLogger('natcap.invest.hra')
# Parameters from the user-provided criteria and info tables
_BUFFER_HEADER = 'STRESSOR BUFFER (METERS)'
_CRITERIA_TYPE_HEADER = 'CRITERIA TYPE'
_HABITAT_NAME_HEADER = 'HABITAT NAME'
_HABITAT_RESILIENCE_HEADER = 'HABITAT RESILIENCE ATTRIBUTES'
_HABITAT_STRESSOR_OVERLAP_HEADER = 'HABITAT STRESSOR OVERLAP PROPERTIES'
_SPATIAL_CRITERIA_TYPE = 'spatial_criteria'
_HABITAT_TYPE = 'habitat'
_STRESSOR_TYPE = 'stressor'
_SUBREGION_FIELD_NAME = 'name'
_WEIGHT_KEY = 'Weight'
_DQ_KEY = 'DQ'
# Parameters to be used in dataframe and output stats CSV
_HABITAT_HEADER = 'HABITAT'
_STRESSOR_HEADER = 'STRESSOR'
_TOTAL_REGION_NAME = 'Total Region'
# Parameters for the spatially explicit criteria shapefiles
_RATING_FIELD = 'rating'
# A cutoff for the decay amount after which we will say scores are equivalent
# to 0, since we don't want to have values outside the buffer zone.
_EXP_DEDAY_CUTOFF = 1E-6
# Target cell type or values for raster files.
_TARGET_PIXEL_FLT = gdal.GDT_Float32
_TARGET_PIXEL_INT = gdal.GDT_Byte
_TARGET_NODATA_FLT = float(numpy.finfo(numpy.float32).min)
_TARGET_NODATA_INT = 255 # for unsigned 8-bit int
# ESPG code for warping rasters to WGS84 coordinate system.
_WGS84_ESPG_CODE = 4326
# Resampling method for rasters.
_RESAMPLE_METHOD = 'near'
# An argument list that will be passed to the GTiff driver. Useful for
# blocksizes, compression, and more.
_DEFAULT_GTIFF_CREATION_OPTIONS = (
'TILED=YES', 'BIGTIFF=YES', 'COMPRESS=DEFLATE',
'BLOCKXSIZE=256', 'BLOCKYSIZE=256')
ARGS_SPEC = {
"model_name": "Habitat Risk Assessment",
"module": __name__,
"userguide_html": "habitat_risk_assessment.html",
"args": {
"workspace_dir": validation.WORKSPACE_SPEC,
"results_suffix": validation.SUFFIX_SPEC,
"n_workers": validation.N_WORKERS_SPEC,
"info_table_path": {
"name": "Habitat Stressor Information CSV or Excel File",
"about": (
"A CSV or Excel file that contains the name of the habitat "
"(H) or stressor (s) on the `NAME` column that matches the "
"names in `criteria_table_path`. Each H/S has its "
"corresponding vector or raster path on the `PATH` column. "
"The `STRESSOR BUFFER (meters)` column should have a buffer "
"value if the `TYPE` column is a stressor."),
"type": "csv",
"required": True,
"validation_options": {
"required_fields": ["NAME", "PATH", "TYPE", _BUFFER_HEADER],
"excel_ok": True,
}
},
"criteria_table_path": {
"name": "Criteria Scores Table",
"about": (
"A CSV or Excel file that contains the set of criteria "
"ranking (rating, DQ and weight) of each stressor on each "
"habitat, as well as the habitat resilience attributes."),
"type": "csv",
"validation_options": {
"excel_ok": True,
},
"required": True,
},
"resolution": {
"name": "Resolution of Analysis (meters)",
"about": (
"The size that should be used to grid the given habitat and "
"stressor files into rasters. This value will be the pixel "
"size of the completed raster files."),
"type": "number",
"required": True,
"validation_options": {
"expression": "value > 0",
}
},
"max_rating": {
"name": "Maximum Criteria Score",
"about": (
"This is the highest score that is used to rate a criteria "
"within this model run. This value would be used to compare "
"with the values within Rating column of the Criteria Scores "
"table."),
"type": "number",
"required": True,
"validation_options": {
"expression": "value > 0",
}
},
"risk_eq": {
"name": "Risk Equation",
"about": (
"Each of these represents an option of a risk calculation "
"equation. This will determine the numeric output of risk "
"for every habitat and stressor overlap area."),
"type": "option_string",
"required": True,
"validation_options": {
"options": ["Multiplicative", "Euclidean"],
}
},
"decay_eq": {
"name": "Decay Equation",
"about": (
"Each of these represents an option of a decay equation "
"for the buffered stressors. If stressor buffering is "
"desired, this equation will determine the rate at which "
"stressor data is reduced."),
"type": "option_string",
"required": True,
"validation_options": {
"options": ["None", "Linear", "Exponential"],
}
},
"aoi_vector_path": {
"name": "Area of Interest",
"about": (
"A GDAL-supported vector file containing feature containing "
"one or more planning regions. subregions. An optional field "
"called `name` could be added to compute average risk values "
"within each subregion."),
"type": "vector",
"required": True,
"validation_options": {
"projected": True,
"projection_units": "m",
}
},
"visualize_outputs": {
"name": "Generate GeoJSONs for Web Visualization",
"help": (
"Check to enable the generation of GeoJSON outputs. This "
"could be used to visualize the risk scores on a map in the "
"HRA visualization web application."),
"type": "boolean",
"required": True,
}
}
}
def execute(args):
"""Habitat Risk Assessment.
Args:
args['workspace_dir'] (str): a path to the output workspace folder.
It will overwrite any files that exist if the path already exists.
args['results_suffix'] (str): a string appended to each output file
path. (optional)
args['info_table_path'] (str): a path to the CSV or Excel file that
contains the name of the habitat (H) or stressor (s) on the
``NAME`` column that matches the names in criteria_table_path.
Each H/S has its corresponding vector or raster path on the
``PATH`` column. The ``STRESSOR BUFFER (meters)`` column should
have a buffer value if the ``TYPE`` column is a stressor.
args['criteria_table_path'] (str): a path to the CSV or Excel file that
contains the set of criteria ranking of each stressor on each
habitat.
args['resolution'] (int): a number representing the desired pixel
dimensions of output rasters in meters.
args['max_rating'] (str, int or float): a number representing the
highest potential value that should be represented in rating in the
criteria scores table.
args['risk_eq'] (str): a string identifying the equation that should be
used in calculating risk scores for each H-S overlap cell. This
will be either 'Euclidean' or 'Multiplicative'.
args['decay_eq'] (str): a string identifying the equation that should
be used in calculating the decay of stressor buffer influence. This
can be 'None', 'Linear', or 'Exponential'.
args['aoi_vector_path'] (str): a path to the shapefile containing one
or more planning regions used to get the average risk value for
each habitat-stressor combination over each area. Optionally, if
each of the shapefile features contain a 'name' field, it will
be used as a way of identifying each individual shape.
args['n_workers'] (int): the number of worker processes to
use for processing this model. If omitted, computation will take
place in the current process. (optional)
args['visualize_outputs'] (bool): if True, create output GeoJSONs and
save them in a visualization_outputs folder, so users can visualize
results on the web app. Default to True if not specified.
(optional)
Returns:
None.
"""
LOGGER.info('Validating arguments')
invalid_parameters = validate(args)
if invalid_parameters:
raise ValueError("Invalid parameters passed: %s" % invalid_parameters)
# Validate and store inputs
LOGGER.info('Validating criteria table file and return cleaned dataframe.')
criteria_df = _get_criteria_dataframe(args['criteria_table_path'])
# Create initial working directories and determine file suffixes
intermediate_dir = os.path.join(
args['workspace_dir'], 'intermediate_outputs')
file_preprocessing_dir = os.path.join(
intermediate_dir, 'file_preprocessing')
output_dir = os.path.join(args['workspace_dir'], 'outputs')
work_dirs = [output_dir, intermediate_dir, file_preprocessing_dir]
# Add visualization_outputs folder if in an electron-Node.js based UI
if args['visualize_outputs']:
viz_dir = os.path.join(args['workspace_dir'], 'visualization_outputs')
work_dirs.append(viz_dir)
utils.make_directories(work_dirs)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
# Initialize a TaskGraph
taskgraph_working_dir = os.path.join(
intermediate_dir, '_taskgraph_working_dir')
try:
n_workers = int(args['n_workers'])
except (KeyError, ValueError, TypeError):
# KeyError when n_workers is not present in args
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # single process mode.
task_graph = taskgraph.TaskGraph(taskgraph_working_dir, n_workers)
# Calculate recovery for each habitat, and overlap scores for each
# habitat-stressor, and store data in the dataframes
info_df, habitat_names, stressor_names = _get_info_dataframe(
args['info_table_path'], file_preprocessing_dir, intermediate_dir,
output_dir, file_suffix)
resilience_attributes, stressor_attributes = \
_get_attributes_from_df(criteria_df, habitat_names, stressor_names)
max_rating = float(args['max_rating'])
recovery_df = _get_recovery_dataframe(
criteria_df, habitat_names, resilience_attributes, max_rating,
file_preprocessing_dir, intermediate_dir, file_suffix)
overlap_df = _get_overlap_dataframe(
criteria_df, habitat_names, stressor_attributes, max_rating,
file_preprocessing_dir, intermediate_dir, file_suffix)
# Append spatially explicit criteria rasters to info_df
criteria_file_dir = os.path.dirname(args['criteria_table_path'])
info_df = _append_spatial_raster_row(
info_df, recovery_df, overlap_df, criteria_file_dir,
file_preprocessing_dir, file_suffix)
# Get target projection from the AOI vector file
if 'aoi_vector_path' in args and args['aoi_vector_path'] != '':
target_sr_wkt = pygeoprocessing.get_vector_info(
args['aoi_vector_path'])['projection_wkt']
target_sr = osr.SpatialReference()
if target_sr_wkt:
target_sr.ImportFromWkt(target_sr_wkt)
if not target_sr.IsProjected():
raise ValueError(
'The AOI vector file %s is provided but not projected.' %
args['aoi_vector_path'])
else:
# Get the value to multiply by linear distances in order to
# transform them to meters
linear_unit = target_sr.GetLinearUnits()
LOGGER.info(
'Target projection from AOI: %s. EPSG: %s. Linear unit: '
'%s.' % (target_sr.GetAttrValue('PROJECTION'),
target_sr.GetAttrValue("AUTHORITY", 1), linear_unit))
# Rasterize habitat and stressor layers if they are vectors.
# Divide resolution (meters) by linear unit to convert to projection units
target_pixel_size = (float(args['resolution'])/linear_unit,
-float(args['resolution'])/linear_unit)
# Simplify the AOI vector for faster run on zonal statistics
simplified_aoi_vector_path = os.path.join(
file_preprocessing_dir, 'simplified_aoi%s.gpkg' % file_suffix)
aoi_tolerance = (float(args['resolution']) / linear_unit) / 2
# Check if subregion field exists in the AOI vector
subregion_field_exists = _has_field_name(
args['aoi_vector_path'], _SUBREGION_FIELD_NAME)
# Simplify the AOI and preserve the subregion field if it exists
aoi_preserved_field = None
aoi_field_name = None
if subregion_field_exists:
aoi_preserved_field = (_SUBREGION_FIELD_NAME, ogr.OFTString)
aoi_field_name = _SUBREGION_FIELD_NAME
LOGGER.info('Simplifying AOI vector while preserving field %s.' %
aoi_field_name)
else:
LOGGER.info('Simplifying AOI vector without subregion field.')
simplify_aoi_task = task_graph.add_task(
func=_simplify_geometry,
args=(args['aoi_vector_path'], aoi_tolerance,
simplified_aoi_vector_path),
kwargs={'preserved_field': aoi_preserved_field},
target_path_list=[simplified_aoi_vector_path],
task_name='simplify_aoi_vector')
# Use the simplified AOI vector to run analyses
aoi_vector_path = simplified_aoi_vector_path
# Rasterize AOI vector for later risk statistics calculation
LOGGER.info('Rasterizing AOI vector.')
rasterized_aoi_pickle_path = os.path.join(
file_preprocessing_dir, 'rasterized_aoi_dictionary%s.pickle' %
file_suffix)
rasterize_aoi_dependent_tasks = [simplify_aoi_task]
# Rasterize AOI vector geometries. If field name doesn't exist, rasterize
# the entire vector onto a raster with values of 1
if aoi_field_name is None:
# Fill the raster with 1s on where a vector geometry touches any pixel
# on the raster
target_raster_path = os.path.join(
file_preprocessing_dir, 'rasterized_simplified_aoi%s.tif' %
file_suffix)
create_raster_task = task_graph.add_task(
func=pygeoprocessing.create_raster_from_vector_extents,
args=(aoi_vector_path, target_raster_path,
target_pixel_size, _TARGET_PIXEL_INT, _TARGET_NODATA_INT),
target_path_list=[target_raster_path],
task_name='rasterize_single_AOI_vector',
dependent_task_list=rasterize_aoi_dependent_tasks)
rasterize_aoi_dependent_tasks.append(create_raster_task)
# Fill the raster with 1s on where a vector geometry exists
rasterize_kwargs = {'burn_values': [1],
'option_list': ["ALL_TOUCHED=TRUE"]}
task_graph.add_task(
func=pygeoprocessing.rasterize,
args=(aoi_vector_path, target_raster_path),
kwargs=rasterize_kwargs,
target_path_list=[target_raster_path],
task_name='rasterize_single_vector',
dependent_task_list=rasterize_aoi_dependent_tasks)
pickle.dump(
{_TOTAL_REGION_NAME: target_raster_path},
open(rasterized_aoi_pickle_path, 'wb'))
# If field name exists., rasterize AOI geometries with same field value
# onto separate rasters
else:
geom_pickle_path = os.path.join(
file_preprocessing_dir, 'aoi_geometries%s.pickle' % file_suffix)
get_vector_geoms_task = task_graph.add_task(
func=_get_vector_geometries_by_field,
args=(aoi_vector_path, aoi_field_name, geom_pickle_path),
target_path_list=[geom_pickle_path],
task_name='get_AOI_vector_geoms_by_field_"%s"' % aoi_field_name,
dependent_task_list=rasterize_aoi_dependent_tasks)
rasterize_aoi_dependent_tasks.append(get_vector_geoms_task)
task_graph.add_task(
func=_create_rasters_from_geometries,
args=(geom_pickle_path, file_preprocessing_dir,
rasterized_aoi_pickle_path, target_pixel_size),
target_path_list=[rasterized_aoi_pickle_path],
task_name='create_rasters_from_AOI_geometries',
dependent_task_list=rasterize_aoi_dependent_tasks)
# Create a raster from vector extent with 0's, then burn the vector
# onto the raster with 1's, for all the H/S layers that are not a raster
align_and_resize_dependency_list = []
for _, row in info_df.iterrows():
if not row['IS_RASTER']:
vector_name = row['NAME']
vector_type = row['TYPE']
vector_path = row['PATH']
simplified_vector_path = row['SIMPLE_VECTOR_PATH']
tolerance = (float(args['resolution']) / row['LINEAR_UNIT']) / 2
target_raster_path = row['BASE_RASTER_PATH']
LOGGER.info('Rasterizing %s vector.' % vector_name)
# Simplify the vector geometry first, with a tolerance of half the
# target resolution
simplify_geometry_task = task_graph.add_task(
func=_simplify_geometry,
args=(vector_path, tolerance, simplified_vector_path),
kwargs={'preserved_field': (_RATING_FIELD, ogr.OFTReal)},
target_path_list=[simplified_vector_path],
task_name='simplify_%s_vector' % vector_name)
rasterize_kwargs = {'burn_values': None, 'option_list': None}
if vector_type == _SPATIAL_CRITERIA_TYPE:
# Fill value for the target raster should be nodata float,
# since criteria rating could be float
fill_value = _TARGET_NODATA_FLT
# If it's a spatial criteria vector, burn the values from the
# ``rating`` attribute
rasterize_kwargs['option_list'] = [
"ATTRIBUTE=" + _RATING_FIELD]
rasterize_nodata = _TARGET_NODATA_FLT
rasterize_pixel_type = _TARGET_PIXEL_FLT
else: # Could be a habitat or stressor vector
# Initial fill values for the target raster should be nodata
# int
fill_value = _TARGET_NODATA_INT
# Fill the raster with 1s on where a vector geometry exists
rasterize_kwargs['burn_values'] = [1]
rasterize_kwargs['option_list'] = ["ALL_TOUCHED=TRUE"]
rasterize_nodata = _TARGET_NODATA_INT
rasterize_pixel_type = _TARGET_PIXEL_INT
align_and_resize_dependency_list.append(task_graph.add_task(
func=_create_raster_and_rasterize_vector,
args=(simplified_vector_path, target_raster_path,
target_pixel_size, rasterize_pixel_type,
rasterize_nodata, fill_value, rasterize_kwargs),
target_path_list=[target_raster_path],
task_name='rasterize_%s' % vector_name,
dependent_task_list=[simplify_geometry_task]))
# Align and resize all the rasters, including rasters provided by the user,
# and rasters created from the vectors.
base_raster_list = info_df.BASE_RASTER_PATH.tolist()
align_raster_list = info_df.ALIGN_RASTER_PATH.tolist()
LOGGER.info('Starting align_and_resize_raster_task.')
align_and_resize_rasters_task = task_graph.add_task(
func=pygeoprocessing.align_and_resize_raster_stack,
args=(base_raster_list, align_raster_list,
[_RESAMPLE_METHOD] * len(base_raster_list),
target_pixel_size, 'union'),
kwargs={'target_projection_wkt': target_sr_wkt},
target_path_list=align_raster_list,
task_name='align_and_resize_raster_task',
dependent_task_list=align_and_resize_dependency_list)
# Make buffer stressors based on their impact distance and decay function
align_stressor_raster_list = info_df[
info_df.TYPE == _STRESSOR_TYPE].ALIGN_RASTER_PATH.tolist()
dist_stressor_raster_list = info_df[
info_df.TYPE == _STRESSOR_TYPE].DIST_RASTER_PATH.tolist()
stressor_names = info_df[info_df.TYPE == _STRESSOR_TYPE].NAME.tolist()
LOGGER.info('Calculating euclidean distance transform on stressors.')
# Convert pixel size from meters to projection unit
sampling_distance = (float(args['resolution'])/linear_unit,
float(args['resolution'])/linear_unit)
distance_transform_tasks = []
for (align_raster_path, dist_raster_path, stressor_name) in zip(
align_stressor_raster_list, dist_stressor_raster_list,
stressor_names):
distance_transform_task = task_graph.add_task(
func=pygeoprocessing.distance_transform_edt,
args=((align_raster_path, 1), dist_raster_path),
kwargs={'sampling_distance': sampling_distance,
'working_dir': intermediate_dir},
target_path_list=[dist_raster_path],
task_name='distance_transform_on_%s' % stressor_name,
dependent_task_list=[align_and_resize_rasters_task])
distance_transform_tasks.append(distance_transform_task)
LOGGER.info('Calculating number of habitats on each pixel.')
align_habitat_raster_list = info_df[
info_df.TYPE == _HABITAT_TYPE].ALIGN_RASTER_PATH.tolist()
habitat_path_band_list = [
(raster_path, 1) for raster_path in align_habitat_raster_list]
habitat_count_raster_path = os.path.join(
file_preprocessing_dir, 'habitat_count%s.tif' % file_suffix)
count_habitat_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(habitat_path_band_list, _count_habitats_op,
habitat_count_raster_path, gdal.GDT_Byte, _TARGET_NODATA_INT),
target_path_list=[habitat_count_raster_path],
task_name='counting_habitats',
dependent_task_list=[align_and_resize_rasters_task])
# A dependent task list for calculating ecosystem risk from all habitat
# risk rasters
ecosystem_risk_dependent_tasks = [count_habitat_task]
# For each habitat, calculate the individual and cumulative exposure,
# consequence, and risk scores from each stressor.
for habitat in habitat_names:
LOGGER.info('Calculating recovery scores on habitat %s.' % habitat)
# Get a dataframe with information on raster paths for the habitat.
habitat_info_df = info_df.loc[info_df.NAME == habitat]
# On a habitat raster, a pixel value of 0 indicates the existence of
# habitat, whereas 1 means non-existence.
habitat_raster_path = habitat_info_df['ALIGN_RASTER_PATH'].item()
habitat_recovery_df = recovery_df.loc[habitat]
recovery_raster_path = habitat_recovery_df['R_RASTER_PATH']
recovery_num_raster_path = habitat_recovery_df[
'R_NUM_RASTER_PATH']
habitat_recovery_denom = habitat_recovery_df['R_DENOM']
calc_habitat_recovery_task_list = []
calc_habitat_recovery_task_list.append(
task_graph.add_task(
func=_calc_habitat_recovery,
args=(habitat_raster_path, habitat_recovery_df, max_rating),
target_path_list=[
recovery_raster_path, recovery_num_raster_path],
task_name='calculate_%s_recovery' % habitat,
dependent_task_list=[align_and_resize_rasters_task]))
total_expo_dependent_tasks = []
total_conseq_dependent_tasks = []
total_risk_dependent_tasks = []
# Calculate exposure/consequence scores on each stressor-habitat pair
for (distance_transform_task, stressor) in zip(
distance_transform_tasks, stressor_names):
LOGGER.info('Calculating exposure, consequence, and risk scores '
'from stressor %s to habitat %s.' %
(stressor, habitat))
# Get a dataframe with information on distance raster path,
# buffer distance, and linear unit for the stressor
stressor_info_df = info_df.loc[info_df.NAME == stressor]
# Get habitat-stressor overlap dataframe with information on
# numerator, denominator, spatially explicit criteria files, and
# target paths
habitat_stressor_overlap_df = overlap_df.loc[(habitat, stressor)]
stressor_dist_raster_path = stressor_info_df[
'DIST_RASTER_PATH'].item()
# Convert stressor buffer from meters to projection unit
stressor_buffer = stressor_info_df[_BUFFER_HEADER].item() / float(
stressor_info_df['LINEAR_UNIT'].item())
# Calculate exposure scores on each habitat-stressor pair
pair_expo_target_path_list = [
habitat_stressor_overlap_df.loc[raster_path] for
raster_path in ['E_NUM_RASTER_PATH', 'E_RASTER_PATH']]
pair_expo_task = task_graph.add_task(
func=_calc_pair_criteria_score,
args=(habitat_stressor_overlap_df, habitat_raster_path,
stressor_dist_raster_path, stressor_buffer,
args['decay_eq'], 'E'),
target_path_list=pair_expo_target_path_list,
task_name='calculate_%s_%s_exposure' % (habitat, stressor),
dependent_task_list=[
align_and_resize_rasters_task, distance_transform_task])
total_expo_dependent_tasks.append(pair_expo_task)
# Calculate consequence scores on each habitat-stressor pair.
# Add recovery numerator and denominator to the scores
pair_conseq_target_path_list = [
habitat_stressor_overlap_df.loc[raster_path] for
raster_path in ['C_NUM_RASTER_PATH', 'C_RASTER_PATH']]
pair_conseq_task = task_graph.add_task(
func=_calc_pair_criteria_score,
args=(habitat_stressor_overlap_df, habitat_raster_path,
stressor_dist_raster_path, stressor_buffer,
args['decay_eq'], 'C'),
kwargs={'recov_params':
(recovery_num_raster_path, habitat_recovery_denom)},
target_path_list=pair_conseq_target_path_list,
task_name='calculate_%s_%s_consequence' % (habitat, stressor),
dependent_task_list=[
align_and_resize_rasters_task,
distance_transform_task] + calc_habitat_recovery_task_list)
total_conseq_dependent_tasks.append(pair_conseq_task)
# Calculate pairwise habitat-stressor risks.
pair_e_raster_path, pair_c_raster_path, \
target_pair_risk_raster_path = [
habitat_stressor_overlap_df.loc[path] for path in
['E_RASTER_PATH', 'C_RASTER_PATH',
'PAIR_RISK_RASTER_PATH']]
pair_risk_calculation_list = [
(pair_e_raster_path, 1), (pair_c_raster_path, 1),
((max_rating, 'raw')), (args['risk_eq'], 'raw')]
pair_risk_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(pair_risk_calculation_list, _pair_risk_op,
target_pair_risk_raster_path, _TARGET_PIXEL_FLT,
_TARGET_NODATA_FLT),
target_path_list=[target_pair_risk_raster_path],
task_name='calculate_%s_%s_risk' % (habitat, stressor),
dependent_task_list=[pair_expo_task, pair_conseq_task])
total_risk_dependent_tasks.append(pair_risk_task)
# Calculate cumulative E, C & risk scores on each habitat
total_e_habitat_path = habitat_info_df['TOT_E_RASTER_PATH'].item()
total_c_habitat_path = habitat_info_df['TOT_C_RASTER_PATH'].item()
LOGGER.info(
'Calculating total exposure scores on habitat %s.' % habitat)
habitat_overlap_df = overlap_df.loc[habitat]
e_num_path_const_list = [
(path, 1) for path in
habitat_overlap_df['E_NUM_RASTER_PATH'].tolist()]
e_denom_list = [
(denom, 'raw') for denom in habitat_overlap_df['E_DENOM'].tolist()]
total_e_path_band_list = list(
[(habitat_raster_path, 1)] + e_num_path_const_list + e_denom_list)
# Calculate total exposure on the habitat
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(total_e_path_band_list,
_total_exposure_op,
total_e_habitat_path,
_TARGET_PIXEL_FLT,
_TARGET_NODATA_FLT),
target_path_list=[total_e_habitat_path],
task_name='calculate_total_exposure_%s' % habitat,
dependent_task_list=total_expo_dependent_tasks)
LOGGER.info(
'Calculating total consequence scores on habitat %s.' % habitat)
recov_num_raster_path = habitat_recovery_df['R_NUM_RASTER_PATH']
c_num_path_const_list = [(path, 1) for path in habitat_overlap_df[
'C_NUM_RASTER_PATH'].tolist()]
c_denom_list = [(denom, 'raw') for denom in habitat_overlap_df[
'C_DENOM'].tolist()]
total_c_path_const_list = list(
[(habitat_raster_path, 1), (recov_num_raster_path, 1),
(habitat_recovery_denom, 'raw')] +
c_num_path_const_list + c_denom_list)
# Calculate total consequence on the habitat
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(total_c_path_const_list,
_total_consequence_op,
total_c_habitat_path,
_TARGET_PIXEL_FLT,
_TARGET_NODATA_FLT),
target_path_list=[total_c_habitat_path],
task_name='calculate_total_consequence_%s' % habitat,
dependent_task_list=total_conseq_dependent_tasks)
LOGGER.info('Calculating total risk score and reclassified risk scores'
' on habitat %s.' % habitat)
total_habitat_risk_path, reclass_habitat_risk_path = [
habitat_info_df[column_header].item() for column_header in [
'TOT_RISK_RASTER_PATH', 'RECLASS_RISK_RASTER_PATH']]
# Get a list of habitat path and individual risk paths on that habitat
# for the final risk calculation
total_risk_path_band_list = [(habitat_raster_path, 1)]
pair_risk_path_list = habitat_overlap_df[
'PAIR_RISK_RASTER_PATH'].tolist()
total_risk_path_band_list = total_risk_path_band_list + [
(path, 1) for path in pair_risk_path_list]
# Calculate the cumulative risk on the habitat from all stressors
calc_risk_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(total_risk_path_band_list, _tot_risk_op,
total_habitat_risk_path, _TARGET_PIXEL_FLT,
_TARGET_NODATA_FLT),
target_path_list=[total_habitat_risk_path],
task_name='calculate_%s_risk' % habitat,
dependent_task_list=total_risk_dependent_tasks)
ecosystem_risk_dependent_tasks.append(calc_risk_task)
# Reclassify the risk score into three categories by dividing the total
# risk score by 3, and return the ceiling
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(total_habitat_risk_path, 1), (max_rating, 'raw')],
_reclassify_risk_op, reclass_habitat_risk_path,
_TARGET_PIXEL_INT, _TARGET_NODATA_INT),
target_path_list=[reclass_habitat_risk_path],
task_name='reclassify_%s_risk' % habitat,
dependent_task_list=[calc_risk_task])
# Calculate ecosystem risk scores. This task depends on every task above,
# so join the graph first.
LOGGER.info('Calculating average and reclassified ecosystem risks.')
# Create input list for calculating average & reclassified ecosystem risks
ecosystem_risk_raster_path = os.path.join(
output_dir, 'TOTAL_RISK_Ecosystem%s.tif' % file_suffix)
reclass_ecosystem_risk_raster_path = os.path.join(
output_dir, 'RECLASS_RISK_Ecosystem%s.tif' % file_suffix)
# Append individual habitat risk rasters to the input list
hab_risk_raster_path_list = info_df.loc[info_df.TYPE == _HABITAT_TYPE][
'TOT_RISK_RASTER_PATH'].tolist()
hab_risk_path_band_list = [(habitat_count_raster_path, 1)]
for hab_risk_raster_path in hab_risk_raster_path_list:
hab_risk_path_band_list.append((hab_risk_raster_path, 1))
# Calculate average ecosystem risk
ecosystem_risk_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(hab_risk_path_band_list, _ecosystem_risk_op,
ecosystem_risk_raster_path, _TARGET_PIXEL_FLT,
_TARGET_NODATA_FLT),
target_path_list=[ecosystem_risk_raster_path],
task_name='calculate_average_ecosystem_risk',
dependent_task_list=ecosystem_risk_dependent_tasks)
# Calculate reclassified ecosystem risk
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([(ecosystem_risk_raster_path, 1), (max_rating, 'raw')],
_reclassify_ecosystem_risk_op,
reclass_ecosystem_risk_raster_path,
_TARGET_PIXEL_INT, _TARGET_NODATA_INT),
target_path_list=[reclass_ecosystem_risk_raster_path],
task_name='reclassify_ecosystem_risk',
dependent_task_list=[ecosystem_risk_task])
# Calculate the mean criteria scores on the habitat pixels within the
# polygons in the AOI vector
LOGGER.info('Calculating zonal statistics.')
# Join here because zonal_rasters needs to be loaded from the pickle file
task_graph.join()
zonal_rasters = pickle.load(open(rasterized_aoi_pickle_path, 'rb'))
region_list = list(zonal_rasters)
# Dependent task list used when converting all the calculated stats to CSV
zonal_stats_dependent_tasks = []
# Filter habitat rows from the information dataframe
habitats_info_df = info_df.loc[info_df.TYPE == _HABITAT_TYPE]
# Calculate and pickle zonal stats to files
for region_name, zonal_raster_path in zonal_rasters.items():
# Compute zonal E and C stats on each habitat-stressor pair
for hab_str_idx, row in overlap_df.iterrows():
# Get habitat-stressor name without extension
habitat_stressor = '_'.join(hab_str_idx)
LOGGER.info('Calculating zonal stats of %s in %s.' %
(habitat_stressor, region_name))
# Compute pairwise E/C zonal stats
for criteria_type in ['E', 'C']:
criteria_raster_path = row[criteria_type + '_RASTER_PATH']
# Append _[region] suffix to the generic pickle file path
target_pickle_stats_path = row[
criteria_type + '_PICKLE_STATS_PATH'].replace(
'.pickle', region_name + '.pickle')
zonal_stats_dependent_tasks.append(task_graph.add_task(
func=_calc_and_pickle_zonal_stats,
args=(criteria_raster_path, zonal_raster_path,
target_pickle_stats_path, file_preprocessing_dir),
target_path_list=[target_pickle_stats_path],
task_name='calc_%s_%s_stats_in_%s' % (
habitat_stressor, criteria_type, region_name)))
# Compute pairwise risk zonal stats
pair_risk_raster_path = row['PAIR_RISK_RASTER_PATH']
target_pickle_stats_path = row[
'PAIR_RISK_PICKLE_STATS_PATH'].replace(
'.pickle', region_name + '.pickle')
zonal_stats_dependent_tasks.append(task_graph.add_task(
func=_calc_and_pickle_zonal_stats,
args=(pair_risk_raster_path, zonal_raster_path,
target_pickle_stats_path, file_preprocessing_dir),
kwargs={'max_rating': max_rating},
target_path_list=[target_pickle_stats_path],
task_name='calc_%s_risk_stats_in_%s' % (
habitat_stressor, region_name)))
# Calculate the overall stats of exposure, consequence, and risk for
# each habitat from all stressors
for _, row in habitats_info_df.iterrows():
habitat_name = row['NAME']
total_risk_raster_path = row['TOT_RISK_RASTER_PATH']
target_pickle_stats_path = row[
'TOT_RISK_PICKLE_STATS_PATH'].replace(
'.pickle', region_name + '.pickle')
LOGGER.info('Calculating overall zonal stats of %s in %s.' %
(habitat_name, region_name))
zonal_stats_dependent_tasks.append(task_graph.add_task(
func=_calc_and_pickle_zonal_stats,
args=(total_risk_raster_path, zonal_raster_path,
target_pickle_stats_path, file_preprocessing_dir),
kwargs={'max_rating': max_rating},
target_path_list=[target_pickle_stats_path],
task_name='calc_%s_risk_stats_in_%s' % (
habitat_name, region_name)))
# Compute pairwise E/C zonal stats
for criteria_type in ['E', 'C']:
total_criteria_raster_path = row[
'TOT_' + criteria_type + '_RASTER_PATH']
target_pickle_stats_path = row[
'TOT_' + criteria_type + '_PICKLE_STATS_PATH'].replace(
'.pickle', region_name + '.pickle')
zonal_stats_dependent_tasks.append(task_graph.add_task(
func=_calc_and_pickle_zonal_stats,
args=(total_criteria_raster_path, zonal_raster_path,
target_pickle_stats_path, file_preprocessing_dir),
target_path_list=[target_pickle_stats_path],
task_name='calc_%s_%s_stats_in_%s' % (
habitat_name, criteria_type, region_name)))
# Convert the statistics dataframe to a CSV file
target_stats_csv_path = os.path.join(
output_dir, 'SUMMARY_STATISTICS%s.csv' % file_suffix)
task_graph.add_task(
func=_zonal_stats_to_csv,
args=(
overlap_df, habitats_info_df, region_list, target_stats_csv_path),
target_path_list=[target_stats_csv_path],
task_name='zonal_stats_to_csv',
dependent_task_list=zonal_stats_dependent_tasks)
# Finish the model if no visualization outputs need to be generated
if not args['visualize_outputs']:
task_graph.close()
task_graph.join()
LOGGER.info('HRA model completed.')
return
else:
LOGGER.info('Generating visualization outputs.')
# Unproject output rasters to WGS84 (World Mercator), and then convert
# the rasters to GeoJSON files for visualization
LOGGER.info('Unprojecting output rasters')
out_risk_raster_paths = info_df[
info_df.TYPE == _HABITAT_TYPE].RECLASS_RISK_RASTER_PATH.tolist()
out_stressor_raster_paths = info_df[
info_df.TYPE == _STRESSOR_TYPE].ALIGN_RASTER_PATH.tolist()
out_raster_paths = out_risk_raster_paths + out_stressor_raster_paths + [
reclass_ecosystem_risk_raster_path]
# Convert the rasters to GeoJSON files in WGS84 for web visualization,
# since only this format would be recognized by leaflet
wgs84_sr = osr.SpatialReference()
wgs84_sr.ImportFromEPSG(_WGS84_ESPG_CODE)
wgs84_wkt = wgs84_sr.ExportToWkt()
for out_raster_path in out_raster_paths:
# Get raster basename without file extension and remove prefix
prefix = 'aligned_'
file_basename = os.path.splitext(os.path.basename(out_raster_path))[0]
if file_basename.startswith(prefix):
file_basename = file_basename[len(prefix):]
# Make a GeoJSON from the unprojected raster with an appropriate field
# name
if file_basename.startswith('RECLASS_RISK_'):
field_name = 'Risk Score'
else:
# Append 'STRESSOR_' prefix if it's not a risk layer
file_basename = 'STRESSOR_' + file_basename
field_name = 'Stressor'
geojson_path = os.path.join(viz_dir, file_basename + '.geojson')
task_graph.add_task(
func=_raster_to_geojson,
args=(out_raster_path, geojson_path, file_basename, field_name),
kwargs={'target_sr_wkt': wgs84_wkt},
target_path_list=[geojson_path],
task_name='create_%s_geojson' % file_basename)
task_graph.close()
task_graph.join()
# Copy summary stats CSV to the viz output folder for scatter plots
# visualization. This keeps all the viz files in one place
viz_stats_csv_path = os.path.join(
viz_dir, 'SUMMARY_STATISTICS%s.csv' % file_suffix)
shutil.copyfile(target_stats_csv_path, viz_stats_csv_path)
LOGGER.info(
'HRA model completed. Please visit http://marineapps.'
'naturalcapitalproject.org/ to visualize your outputs.')
def _create_raster_and_rasterize_vector(
simplified_vector_path, target_raster_path, target_pixel_size,
rasterize_pixel_type, rasterize_nodata, fill_value, rasterize_kwargs):
"""Wrap these related operations so they can be captured in one task."""
pygeoprocessing.create_raster_from_vector_extents(
simplified_vector_path, target_raster_path,
target_pixel_size, rasterize_pixel_type, rasterize_nodata,
fill_value=fill_value)
pygeoprocessing.rasterize(
simplified_vector_path, target_raster_path,
burn_values=rasterize_kwargs['burn_values'],
option_list=rasterize_kwargs['option_list'])
def _raster_to_geojson(
base_raster_path, target_geojson_path, layer_name, field_name,
target_sr_wkt=None):
"""Convert a raster to a GeoJSON file with layer and field name.
Typically the base raster will be projected and the target GeoJSON should
end up with geographic coordinates (EPSG: 4326 per the GeoJSON spec).
So a GPKG serves as intermediate storage for the polygonized projected
features.
Args:
base_raster_path (str): the raster that needs to be turned into a
GeoJSON file.
target_geojson_path (str): the desired path for the new GeoJSON.
layer_name (str): the name of the layer going into the new shapefile.
field_name (str): the name of the field to write raster values in.
target_sr_wkt (str): the target projection for vector in Well Known
Text (WKT) form.
Returns:
None.
"""
raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)
band = raster.GetRasterBand(1)
mask = band.GetMaskBand()
# Use raster SRS for the temp GPKG
base_sr = osr.SpatialReference()
base_sr_wkt = raster.GetProjectionRef()
base_sr.ImportFromWkt(base_sr_wkt)
# Polygonize onto a GPKG
gpkg_driver = gdal.GetDriverByName('GPKG')
temp_gpkg_path = os.path.splitext(target_geojson_path)[0] + '.gpkg'
vector = gpkg_driver.Create(temp_gpkg_path, 0, 0, 0, gdal.GDT_Unknown)
vector.StartTransaction()
vector_layer = vector.CreateLayer(layer_name, base_sr, ogr.wkbPolygon)
# Create an integer field that contains values from the raster
field_defn = ogr.FieldDefn(str(field_name), ogr.OFTInteger)
field_defn.SetWidth(3)
field_defn.SetPrecision(0)
vector_layer.CreateField(field_defn)
gdal.Polygonize(band, mask, vector_layer, 0)
vector_layer.SyncToDisk()
vector.CommitTransaction()
vector_layer = None
vector = None
band = None
raster = None
# Convert GPKG to GeoJSON, reprojecting if necessary
if target_sr_wkt and base_sr_wkt != target_sr_wkt:
pygeoprocessing.reproject_vector(
temp_gpkg_path, target_sr_wkt, target_geojson_path,
driver_name='GeoJSON')
else:
geojson_driver = gdal.GetDriverByName('GeoJSON')
geojson_driver.CreateCopy(target_geojson_path, temp_gpkg_path)
os.remove(temp_gpkg_path)
def _calc_and_pickle_zonal_stats(
score_raster_path, zonal_raster_path, target_pickle_stats_path,
working_dir, max_rating=None):
"""Calculate zonal stats on a score raster where zonal raster is 1.
Clip the score raster with the bounding box of zonal raster first, so
we only look at blocks that intersect.
Args:
score_raster_path (str): a path to the E/C/risk score raster to be
analyzed.
zonal_raster_path (str): a path to the zonal raster with 1s
representing the regional extent, used for getting statistics
from score raster in that region.
target_pickle_stats_path (str): a path to the pickle file for storing
zonal statistics, including count, sum, min, max, and mean.
working_dir (str): a path to the working folder for saving clipped
score raster file.
max_rating (float): if exists, it's used for classifying risks into
three categories and calculating percentage area of high/medium/low
scores.
Returns:
None
"""
# Create a stats dictionary for saving zonal statistics, including
# mean, min, and max.
stats_dict = {}
stats_dict['MIN'] = float('inf')
stats_dict['MAX'] = float('-inf')
for stats_type in ['MEAN', '%HIGH', '%MEDIUM', '%LOW']:
stats_dict[stats_type] = 0.
# Clip score raster to the extent of zonal raster. The file will be deleted
# at the end.
with tempfile.NamedTemporaryFile(
prefix='clipped_', suffix='.tif', delete=False,
dir=working_dir) as clipped_raster_file:
clipped_score_raster_path = clipped_raster_file.name
zonal_raster_info = pygeoprocessing.get_raster_info(zonal_raster_path)
target_pixel_size = zonal_raster_info['pixel_size']
target_bounding_box = zonal_raster_info['bounding_box']
target_sr_wkt = zonal_raster_info['projection_wkt']
pygeoprocessing.warp_raster(
score_raster_path, target_pixel_size, clipped_score_raster_path,
_RESAMPLE_METHOD, target_bb=target_bounding_box,
target_projection_wkt=target_sr_wkt)
# Return a dictionary with values of 0, if the two input rasters do not
# intersect at all.
score_raster = gdal.OpenEx(clipped_score_raster_path, gdal.OF_RASTER)
try:
score_band = score_raster.GetRasterBand(1)
except ValueError as e:
if 'Bounding boxes do not intersect' in repr(e):
LOGGER.info('Bounding boxes of %s and %s do not intersect.' %
(score_raster_path, zonal_raster_path))
for stats_type in stats_dict:
stats_dict[stats_type] = None # This will leave blank in CSV table
score_raster = None
pickle.dump(stats_dict, open(target_pickle_stats_path, 'wb'))
os.remove(clipped_score_raster_path)
return
score_nodata = score_band.GetNoDataValue()
zonal_raster = gdal.OpenEx(zonal_raster_path, gdal.OF_RASTER)
zonal_band = zonal_raster.GetRasterBand(1)
pixel_count = 0.
pixel_sum = 0.
if max_rating:
high_score_count = 0.
med_score_count = 0.
low_score_count = 0.
# Iterate through each data block and calculate stats.
for score_offsets in pygeoprocessing.iterblocks(
(clipped_score_raster_path, 1), offset_only=True):
score_block = score_band.ReadAsArray(**score_offsets)
zonal_block = zonal_band.ReadAsArray(**score_offsets)
valid_mask = (score_block != score_nodata) & (zonal_block == 1)
valid_score_block = score_block[valid_mask]
if valid_score_block.size == 0:
continue
# Calculate min and max values, and sum and count of valid pixels.
pixel_count += valid_score_block.size
pixel_sum += numpy.sum(valid_score_block)
stats_dict['MIN'] = min(
stats_dict['MIN'], numpy.amin(valid_score_block))
stats_dict['MAX'] = max(
stats_dict['MAX'], numpy.amax(valid_score_block))
# Calculate percentage of high, medium, and low rating areas.
if max_rating:
high_score_count += valid_score_block[
(valid_score_block > max_rating/3*2)].size
med_score_count += valid_score_block[
(valid_score_block <= max_rating/3*2) &
(valid_score_block > max_rating/3)].size
low_score_count += valid_score_block[
(valid_score_block <= max_rating/3)].size
if pixel_count > 0:
stats_dict['MEAN'] = pixel_sum / pixel_count
if max_rating:
stats_dict['%HIGH'] = high_score_count/pixel_count*100.
stats_dict['%MEDIUM'] = med_score_count/pixel_count*100.
stats_dict['%LOW'] = low_score_count/pixel_count*100.
else:
for stats_type in stats_dict:
stats_dict[stats_type] = None # This will leave blank in CSV table
score_raster = None
zonal_raster = None
zonal_band = None
score_band = None
pickle.dump(stats_dict, open(target_pickle_stats_path, 'wb'))
os.remove(clipped_score_raster_path)
def _zonal_stats_to_csv(
overlap_df, info_df, region_list, target_stats_csv_path):
"""Unpickle zonal stats from files and concatenate the dataframe into CSV.
Args:
overlap_df (dataframe): a multi-index dataframe with exposure and
consequence raster paths, as well as pickle path columns for
getting zonal statistics dictionary from.
habitat_info_df (dataframe): a dataframe with information on total
exposure, consequence, and risk raster/pickle file paths for each
habitat.
region_list (list): a list of subregion names used as column values of
the ``SUBREGION`` column in the zonal stats dataframe.
target_stats_csv_path (str): path to the CSV file for saving the final
merged zonal stats dataframe.
Returns:
None
"""
# Create a stats dataframe with habitat and stressor index from overlap
# dataframe
crit_stats_cols = ['MEAN', 'MIN', 'MAX']
risk_stats_cols = crit_stats_cols + ['%HIGH', '%MEDIUM', '%LOW']
len_crit_cols = len(crit_stats_cols)
len_risk_cols = len(risk_stats_cols)
columns = map(
str.__add__,
['E_']*len_crit_cols + ['C_']*len_crit_cols + ['R_']*len_risk_cols,
crit_stats_cols*2 + risk_stats_cols)
stats_df = pandas.DataFrame(index=overlap_df.index, columns=columns)
# Add a ``SUBREGION`` column to the dataframe and update it with the
# corresponding stats in each subregion
region_df_list = []
for region in region_list:
region_df = stats_df.copy()
# Insert the new column in the beginning
region_df.insert(loc=0, column='SUBREGION', value=region)
for hab_str_idx, row in overlap_df.iterrows():
# Unpack pairwise criteria stats
for criteria_type in ['E', 'C']:
crit_stats_dict = pickle.load(
open(row[criteria_type + '_PICKLE_STATS_PATH'].replace(
'.pickle', region + '.pickle'), 'rb'))
for stats_type in crit_stats_cols:
header = criteria_type + '_' + stats_type
region_df.loc[hab_str_idx, header] = crit_stats_dict[
stats_type]
# Unpack pairwise risk stats
risk_stats_dict = pickle.load(
open(row['PAIR_RISK_PICKLE_STATS_PATH'].replace(
'.pickle', region + '.pickle'), 'rb'))
for stats_type in risk_stats_cols:
header = 'R_' + stats_type
region_df.loc[hab_str_idx, header] = risk_stats_dict[
stats_type]
for _, row in info_df.iterrows():
habitat_name = row['NAME']
# An index used as values for HABITAT and STRESSOR columns
hab_only_idx = (habitat_name, '(FROM ALL STRESSORS)')
region_df.loc[hab_only_idx, 'SUBREGION'] = region
# Unpack total criteria stats
for criteria_type in ['E', 'C']:
crit_stats_dict = pickle.load(
open(row[
'TOT_' + criteria_type + '_PICKLE_STATS_PATH'].replace(
'.pickle', region + '.pickle'), 'rb'))
for stats_type in crit_stats_cols:
header = criteria_type + '_' + stats_type
region_df.loc[hab_only_idx, header] = crit_stats_dict[
stats_type]
# Unpack total risk stats
risk_stats_dict = pickle.load(
open(row['TOT_RISK_PICKLE_STATS_PATH'].replace(
'.pickle', region + '.pickle'), 'rb'))
for stats_type in risk_stats_cols:
header = 'R_' + stats_type
region_df.loc[hab_only_idx, header] = risk_stats_dict[
stats_type]
region_df_list.append(region_df)
# Merge all the subregion dataframes
final_stats_df = | pandas.concat(region_df_list) | pandas.concat |
# Module: Regression
# Author: <NAME> <<EMAIL>>
# License: MIT
# Release: PyCaret 2.1
# Last modified : 17/08/2020
def setup(data,
target,
train_size = 0.7,
sampling = True,
sample_estimator = None,
categorical_features = None,
categorical_imputation = 'constant',
ordinal_features = None,
high_cardinality_features = None,
high_cardinality_method = 'frequency',
numeric_features = None,
numeric_imputation = 'mean',
date_features = None,
ignore_features = None,
normalize = False,
normalize_method = 'zscore',
transformation = False,
transformation_method = 'yeo-johnson',
handle_unknown_categorical = True,
unknown_categorical_method = 'least_frequent',
pca = False,
pca_method = 'linear',
pca_components = None,
ignore_low_variance = False,
combine_rare_levels = False,
rare_level_threshold = 0.10,
bin_numeric_features = None,
remove_outliers = False,
outliers_threshold = 0.05,
remove_multicollinearity = False,
multicollinearity_threshold = 0.9,
remove_perfect_collinearity = False, #added in pycaret==2.0.0
create_clusters = False,
cluster_iter = 20,
polynomial_features = False,
polynomial_degree = 2,
trigonometry_features = False,
polynomial_threshold = 0.1,
group_features = None,
group_names = None,
feature_selection = False,
feature_selection_threshold = 0.8,
feature_selection_method = 'classic',
feature_interaction = False,
feature_ratio = False,
interaction_threshold = 0.01,
transform_target = False,
transform_target_method = 'box-cox',
data_split_shuffle = True, #added in pycaret==2.0.0
folds_shuffle = False, #added in pycaret==2.0.0
n_jobs = -1, #added in pycaret==2.0.0
use_gpu = False, #added in pycaret==2.1
html = True, #added in pycaret==2.0.0
session_id = None,
log_experiment = False, #added in pycaret==2.0.0
experiment_name = None, #added in pycaret==2.0.0
log_plots = False, #added in pycaret==2.0.0
log_profile = False, #added in pycaret==2.0.0
log_data = False, #added in pycaret==2.0.0
silent = False,
verbose = True, #added in pycaret==2.0.0
profile = False):
"""
This function initializes the environment in pycaret and creates the transformation
pipeline to prepare the data for modeling and deployment. setup() must called before
executing any other function in pycaret. It takes two mandatory parameters:
dataframe {array-like, sparse matrix} and name of the target column.
All other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> experiment_name = setup(data = boston, target = 'medv')
'boston' is a pandas.DataFrame and 'medv' is the name of target column.
Parameters
----------
data : pandas.DataFrame
Shape (n_samples, n_features) where n_samples is the number of samples and n_features is the number of features.
target: string
Name of target column to be passed in as string.
train_size: float, default = 0.7
Size of the training set. By default, 70% of the data will be used for training
and validation. The remaining data will be used for test / hold-out set.
sampling: bool, default = True
When the sample size exceeds 25,000 samples, pycaret will build a base estimator
at various sample sizes from the original dataset. This will return a performance
plot of R2 values at various sample levels, that will assist in deciding the
preferred sample size for modeling. The desired sample size must then be entered
for training and validation in the pycaret environment. When sample_size entered
is less than 1, the remaining dataset (1 - sample) is used for fitting the model
only when finalize_model() is called.
sample_estimator: object, default = None
If None, Linear Regression is used by default.
categorical_features: string, default = None
If the inferred data types are not correct, categorical_features can be used to
overwrite the inferred type. If when running setup the type of 'column1' is
inferred as numeric instead of categorical, then this parameter can be used
to overwrite the type by passing categorical_features = ['column1'].
categorical_imputation: string, default = 'constant'
If missing values are found in categorical features, they will be imputed with
a constant 'not_available' value. The other available option is 'mode' which
imputes the missing value using most frequent value in the training dataset.
ordinal_features: dictionary, default = None
When the data contains ordinal features, they must be encoded differently using
the ordinal_features param. If the data has a categorical variable with values
of 'low', 'medium', 'high' and it is known that low < medium < high, then it can
be passed as ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
The list sequence must be in increasing order from lowest to highest.
high_cardinality_features: string, default = None
When the data containts features with high cardinality, they can be compressed
into fewer levels by passing them as a list of column names with high cardinality.
Features are compressed using method defined in high_cardinality_method param.
high_cardinality_method: string, default = 'frequency'
When method set to 'frequency' it will replace the original value of feature
with the frequency distribution and convert the feature into numeric. Other
available method is 'clustering' which performs the clustering on statistical
attribute of data and replaces the original value of feature with cluster label.
The number of clusters is determined using a combination of Calinski-Harabasz and
Silhouette criterion.
numeric_features: string, default = None
If the inferred data types are not correct, numeric_features can be used to
overwrite the inferred type. If when running setup the type of 'column1' is
inferred as a categorical instead of numeric, then this parameter can be used
to overwrite by passing numeric_features = ['column1'].
numeric_imputation: string, default = 'mean'
If missing values are found in numeric features, they will be imputed with the
mean value of the feature. The other available option is 'median' which imputes
the value using the median value in the training dataset.
date_features: string, default = None
If the data has a DateTime column that is not automatically detected when running
setup, this parameter can be used by passing date_features = 'date_column_name'.
It can work with multiple date columns. Date columns are not used in modeling.
Instead, feature extraction is performed and date columns are dropped from the
dataset. If the date column includes a time stamp, features related to time will
also be extracted.
ignore_features: string, default = None
If any feature should be ignored for modeling, it can be passed to the param
ignore_features. The ID and DateTime columns when inferred, are automatically
set to ignore for modeling.
normalize: bool, default = False
When set to True, the feature space is transformed using the normalized_method
param. Generally, linear algorithms perform better with normalized data however,
the results may vary and it is advised to run multiple experiments to evaluate
the benefit of normalization.
normalize_method: string, default = 'zscore'
Defines the method to be used for normalization. By default, normalize method
is set to 'zscore'. The standard zscore is calculated as z = (x - u) / s. The
other available options are:
'minmax' : scales and translates each feature individually such that it is in
the range of 0 - 1.
'maxabs' : scales and translates each feature individually such that the maximal
absolute value of each feature will be 1.0. It does not shift/center
the data, and thus does not destroy any sparsity.
'robust' : scales and translates each feature according to the Interquartile range.
When the dataset contains outliers, robust scaler often gives better
results.
transformation: bool, default = False
When set to True, a power transformation is applied to make the data more normal /
Gaussian-like. This is useful for modeling issues related to heteroscedasticity or
other situations where normality is desired. The optimal parameter for stabilizing
variance and minimizing skewness is estimated through maximum likelihood.
transformation_method: string, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is set
to 'yeo-johnson'. The other available option is 'quantile' transformation. Both
the transformation transforms the feature set to follow a Gaussian-like or normal
distribution. Note that the quantile transformer is non-linear and may distort linear
correlations between variables measured at the same scale.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in new / unseen data are replaced by
the most or least frequent level as learned in the training data. The method is
defined under the unknown_categorical_method param.
unknown_categorical_method: string, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in pca_method param. In
supervised learning pca is generally performed when dealing with high feature
space and memory is a constraint. Note that not all datasets can be decomposed
efficiently using a linear PCA technique and that applying PCA may result in loss
of information. As such, it is advised to run multiple experiments with different
pca_methods to evaluate the impact.
pca_method: string, default = 'linear'
The 'linear' method performs Linear dimensionality reduction using Singular Value
Decomposition. The other available options are:
kernel : dimensionality reduction through the use of RVF kernel.
incremental : replacement for 'linear' pca when the dataset to be decomposed is
too large to fit in memory
pca_components: int/float, default = 0.99
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be strictly
less than the original number of features in the dataset.
ignore_low_variance: bool, default = False
When set to True, all categorical features with statistically insignificant variances
are removed from the dataset. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, all levels in categorical features below the threshold defined
in rare_level_threshold param are combined together as a single level. There must be
atleast two levels under the threshold for this to take effect. rare_level_threshold
represents the percentile distribution of level frequency. Generally, this technique
is applied to limit a sparse matrix caused by high numbers of levels in categorical
features.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Only comes into
effect when combine_rare_levels is set to True.
bin_numeric_features: list, default = None
When a list of numeric features is passed they are transformed into categorical
features using KMeans, where values in each bin have the same nearest center of a
1D k-means cluster. The number of clusters are determined based on the 'sturges'
method. It is only optimal for gaussian data and underestimates the number of bins
for large non-gaussian datasets.
remove_outliers: bool, default = False
When set to True, outliers from the training data are removed using PCA linear
dimensionality reduction using the Singular Value Decomposition technique.
outliers_threshold: float, default = 0.05
The percentage / proportion of outliers in the dataset can be defined using
the outliers_threshold param. By default, 0.05 is used which means 0.025 of the
values on each side of the distribution's tail are dropped from training data.
remove_multicollinearity: bool, default = False
When set to True, the variables with inter-correlations higher than the threshold
defined under the multicollinearity_threshold param are dropped. When two features
are highly correlated with each other, the feature that is less correlated with
the target variable is dropped.
multicollinearity_threshold: float, default = 0.9
Threshold used for dropping the correlated features. Only comes into effect when
remove_multicollinearity is set to True.
remove_perfect_collinearity: bool, default = False
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, When two features are 100% correlated, one of it is randomly
dropped from the dataset.
create_clusters: bool, default = False
When set to True, an additional feature is created where each instance is assigned
to a cluster. The number of clusters is determined using a combination of
Calinski-Harabasz and Silhouette criterion.
cluster_iter: int, default = 20
Number of iterations used to create a cluster. Each iteration represents cluster
size. Only comes into effect when create_clusters param is set to True.
polynomial_features: bool, default = False
When set to True, new features are created based on all polynomial combinations
that exist within the numeric features in a dataset to the degree defined in
polynomial_degree param.
polynomial_degree: int, default = 2
Degree of polynomial features. For example, if an input sample is two dimensional
and of the form [a, b], the polynomial features with degree = 2 are:
[1, a, b, a^2, ab, b^2].
trigonometry_features: bool, default = False
When set to True, new features are created based on all trigonometric combinations
that exist within the numeric features in a dataset to the degree defined in the
polynomial_degree param.
polynomial_threshold: float, default = 0.1
This is used to compress a sparse matrix of polynomial and trigonometric features.
Polynomial and trigonometric features whose feature importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
group_features: list or list of list, default = None
When a dataset contains features that have related characteristics, the group_features
param can be used for statistical feature extraction. For example, if a dataset has
numeric features that are related with each other (i.e 'Col1', 'Col2', 'Col3'), a list
containing the column names can be passed under group_features to extract statistical
information such as the mean, median, mode and standard deviation.
group_names: list, default = None
When group_features is passed, a name of the group can be passed into the group_names
param as a list containing strings. The length of a group_names list must equal to the
length of group_features. When the length doesn't match or the name is not passed, new
features are sequentially named such as group_1, group_2 etc.
feature_selection: bool, default = False
When set to True, a subset of features are selected using a combination of various
permutation importance techniques including Random Forest, Adaboost and Linear
correlation with target variable. The size of the subset is dependent on the
feature_selection_param. Generally, this is used to constrain the feature space
in order to improve efficiency in modeling. When polynomial_features and
feature_interaction are used, it is highly recommended to define the
feature_selection_threshold param with a lower value. Feature selection algorithm
by default is 'classic' but could be 'boruta', which will lead PyCaret to create
use the Boruta selection algorithm.
feature_selection_threshold: float, default = 0.8
Threshold used for feature selection (including newly created polynomial features).
A higher value will result in a higher feature space. It is recommended to do multiple
trials with different values of feature_selection_threshold specially in cases where
polynomial_features and feature_interaction are used. Setting a very low value may be
efficient but could result in under-fitting.
feature_selection_method: str, default = 'classic'
Can be either 'classic' or 'boruta'. Selects the algorithm responsible for
choosing a subset of features. For the 'classic' selection method, PyCaret will use various
permutation importance techniques. For the 'boruta' algorithm, PyCaret will create
an instance of boosted trees model, which will iterate with permutation over all
features and choose the best ones based on the distributions of feature importance.
More in: https://pdfs.semanticscholar.org/85a8/b1d9c52f9f795fda7e12376e751526953f38.pdf%3E
feature_interaction: bool, default = False
When set to True, it will create new features by interacting (a * b) for all numeric
variables in the dataset including polynomial and trigonometric features (if created).
This feature is not scalable and may not work as expected on datasets with large
feature space.
feature_ratio: bool, default = False
When set to True, it will create new features by calculating the ratios (a / b) of all
numeric variables in the dataset. This feature is not scalable and may not work as
expected on datasets with large feature space.
interaction_threshold: bool, default = 0.01
Similar to polynomial_threshold, It is used to compress a sparse matrix of newly
created features through interaction. Features whose importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
transform_target: bool, default = False
When set to True, target variable is transformed using the method defined in
transform_target_method param. Target transformation is applied separately from
feature transformations.
transform_target_method: string, default = 'box-cox'
'Box-cox' and 'yeo-johnson' methods are supported. Box-Cox requires input data to
be strictly positive, while Yeo-Johnson supports both positive or negative data.
When transform_target_method is 'box-cox' and target variable contains negative
values, method is internally forced to 'yeo-johnson' to avoid exceptions.
data_split_shuffle: bool, default = True
If set to False, prevents shuffling of rows when splitting data.
folds_shuffle: bool, default = True
If set to False, prevents shuffling of rows when using cross validation.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single processor
set n_jobs to None.
use_gpu: bool, default = False
If set to True, algorithms that supports gpu are trained using gpu.
html: bool, default = True
If set to False, prevents runtime display of monitor. This must be set to False
when using environment that doesnt support HTML.
session_id: int, default = None
If None, a random seed is generated and returned in the Information grid. The
unique number is then distributed as a seed in all functions used during the
experiment. This can be used for later reproducibility of the entire experiment.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on MLFlow server.
experiment_name: str, default = None
Name of experiment for logging. When set to None, 'reg' is by default used as
alias for the experiment name.
log_plots: bool, default = False
When set to True, specific plots are logged in MLflow as a png file. By default,
it is set to False.
log_profile: bool, default = False
When set to True, data profile is also logged on MLflow as a html file. By default,
it is set to False.
log_data: bool, default = False
When set to True, train and test dataset are logged as csv.
silent: bool, default = False
When set to True, confirmation of data types is not required. All preprocessing will
be performed assuming automatically inferred data types. Not recommended for direct use
except for established pipelines.
verbose: Boolean, default = True
Information grid is not printed when verbose is set to False.
profile: bool, default = False
If set to true, a data profile for Exploratory Data Analysis will be displayed
in an interactive HTML report.
Returns
-------
info_grid
Information grid is printed.
environment
This function returns various outputs that are stored in variable
as tuple. They are used by other functions in pycaret.
"""
#exception checking
import sys
from pycaret.utils import __version__
ver = __version__()
import logging
# create logger
global logger
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("PyCaret Regression Module")
logger.info('version ' + str(ver))
logger.info("Initializing setup()")
#generate USI for mlflow tracking
import secrets
global USI
USI = secrets.token_hex(nbytes=2)
logger.info('USI: ' + str(USI))
logger.info("""setup(data={}, target={}, train_size={}, sampling={}, sample_estimator={}, categorical_features={}, categorical_imputation={}, ordinal_features={},
high_cardinality_features={}, high_cardinality_method={}, numeric_features={}, numeric_imputation={}, date_features={}, ignore_features={}, normalize={},
normalize_method={}, transformation={}, transformation_method={}, handle_unknown_categorical={}, unknown_categorical_method={}, pca={}, pca_method={},
pca_components={}, ignore_low_variance={}, combine_rare_levels={}, rare_level_threshold={}, bin_numeric_features={}, remove_outliers={}, outliers_threshold={},
remove_multicollinearity={}, multicollinearity_threshold={}, remove_perfect_collinearity={}, create_clusters={}, cluster_iter={},
polynomial_features={}, polynomial_degree={}, trigonometry_features={}, polynomial_threshold={}, group_features={},
group_names={}, feature_selection={}, feature_selection_threshold={}, feature_interaction={}, feature_ratio={}, interaction_threshold={}, transform_target={},
transform_target_method={}, data_split_shuffle={}, folds_shuffle={}, n_jobs={}, html={}, session_id={}, log_experiment={},
experiment_name={}, log_plots={}, log_profile={}, log_data={}, silent={}, verbose={}, profile={})""".format(\
str(data.shape), str(target), str(train_size), str(sampling), str(sample_estimator), str(categorical_features), str(categorical_imputation), str(ordinal_features),\
str(high_cardinality_features), str(high_cardinality_method), str(numeric_features), str(numeric_imputation), str(date_features), str(ignore_features),\
str(normalize), str(normalize_method), str(transformation), str(transformation_method), str(handle_unknown_categorical), str(unknown_categorical_method), str(pca),\
str(pca_method), str(pca_components), str(ignore_low_variance), str(combine_rare_levels), str(rare_level_threshold), str(bin_numeric_features), str(remove_outliers),\
str(outliers_threshold), str(remove_multicollinearity), str(multicollinearity_threshold), str(remove_perfect_collinearity), str(create_clusters), str(cluster_iter),\
str(polynomial_features), str(polynomial_degree), str(trigonometry_features), str(polynomial_threshold), str(group_features), str(group_names),\
str(feature_selection), str(feature_selection_threshold), str(feature_interaction), str(feature_ratio), str(interaction_threshold), str(transform_target),\
str(transform_target_method), str(data_split_shuffle), str(folds_shuffle), str(n_jobs), str(html), str(session_id),\
str(log_experiment), str(experiment_name), str(log_plots), str(log_profile), str(log_data), str(silent), str(verbose), str(profile)))
#logging environment and libraries
logger.info("Checking environment")
from platform import python_version, platform, python_build, machine
try:
logger.info("python_version: " + str(python_version()))
except:
logger.warning("cannot find platform.python_version")
try:
logger.info("python_build: " + str(python_build()))
except:
logger.warning("cannot find platform.python_build")
try:
logger.info("machine: " + str(machine()))
except:
logger.warning("cannot find platform.machine")
try:
logger.info("platform: " + str(platform()))
except:
logger.warning("cannot find platform.platform")
try:
import psutil
logger.info("Memory: " + str(psutil.virtual_memory()))
logger.info("Physical Core: " + str(psutil.cpu_count(logical=False)))
logger.info("Logical Core: " + str(psutil.cpu_count(logical=True)))
except:
logger.warning("cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging. ")
logger.info("Checking libraries")
try:
from pandas import __version__
logger.info("pd==" + str(__version__))
except:
logger.warning("pandas not found")
try:
from numpy import __version__
logger.info("numpy==" + str(__version__))
except:
logger.warning("numpy not found")
try:
from sklearn import __version__
logger.info("sklearn==" + str(__version__))
except:
logger.warning("sklearn not found")
try:
from xgboost import __version__
logger.info("xgboost==" + str(__version__))
except:
logger.warning("xgboost not found")
try:
from lightgbm import __version__
logger.info("lightgbm==" + str(__version__))
except:
logger.warning("lightgbm not found")
try:
from catboost import __version__
logger.info("catboost==" + str(__version__))
except:
logger.warning("catboost not found")
try:
from mlflow.version import VERSION
import warnings
warnings.filterwarnings('ignore')
logger.info("mlflow==" + str(VERSION))
except:
logger.warning("mlflow not found")
#run_time
import datetime, time
runtime_start = time.time()
logger.info("Checking Exceptions")
#checking data type
if hasattr(data,'shape') is False:
sys.exit('(Type Error): data passed must be of type pandas.DataFrame')
#checking train size parameter
if type(train_size) is not float:
sys.exit('(Type Error): train_size parameter only accepts float value.')
#checking sampling parameter
if type(sampling) is not bool:
sys.exit('(Type Error): sampling parameter only accepts True or False.')
#checking sampling parameter
if target not in data.columns:
sys.exit('(Value Error): Target parameter doesnt exist in the data provided.')
#checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit('(Type Error): session_id parameter must be an integer.')
#checking sampling parameter
if type(profile) is not bool:
sys.exit('(Type Error): profile parameter only accepts True or False.')
#checking normalize parameter
if type(normalize) is not bool:
sys.exit('(Type Error): normalize parameter only accepts True or False.')
#checking transformation parameter
if type(transformation) is not bool:
sys.exit('(Type Error): transformation parameter only accepts True or False.')
#checking categorical imputation
allowed_categorical_imputation = ['constant', 'mode']
if categorical_imputation not in allowed_categorical_imputation:
sys.exit("(Value Error): categorical_imputation param only accepts 'constant' or 'mode' ")
#ordinal_features
if ordinal_features is not None:
if type(ordinal_features) is not dict:
sys.exit("(Type Error): ordinal_features must be of type dictionary with column name as key and ordered values as list. ")
#ordinal features check
if ordinal_features is not None:
data_cols = data.columns
data_cols = data_cols.drop(target)
ord_keys = ordinal_features.keys()
for i in ord_keys:
if i not in data_cols:
sys.exit("(Value Error) Column name passed as a key in ordinal_features param doesnt exist. ")
for k in ord_keys:
if data[k].nunique() != len(ordinal_features.get(k)):
sys.exit("(Value Error) Levels passed in ordinal_features param doesnt match with levels in data. ")
for i in ord_keys:
value_in_keys = ordinal_features.get(i)
value_in_data = list(data[i].unique().astype(str))
for j in value_in_keys:
if j not in value_in_data:
text = "Column name '" + str(i) + "' doesnt contain any level named '" + str(j) + "'."
sys.exit(text)
#high_cardinality_features
if high_cardinality_features is not None:
if type(high_cardinality_features) is not list:
sys.exit("(Type Error): high_cardinality_features param only accepts name of columns as a list. ")
if high_cardinality_features is not None:
data_cols = data.columns
data_cols = data_cols.drop(target)
for i in high_cardinality_features:
if i not in data_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#checking numeric imputation
allowed_numeric_imputation = ['mean', 'median']
if numeric_imputation not in allowed_numeric_imputation:
sys.exit("(Value Error): numeric_imputation param only accepts 'mean' or 'median' ")
#checking normalize method
allowed_normalize_method = ['zscore', 'minmax', 'maxabs', 'robust']
if normalize_method not in allowed_normalize_method:
sys.exit("(Value Error): normalize_method param only accepts 'zscore', 'minxmax', 'maxabs' or 'robust'. ")
#checking transformation method
allowed_transformation_method = ['yeo-johnson', 'quantile']
if transformation_method not in allowed_transformation_method:
sys.exit("(Value Error): transformation_method param only accepts 'yeo-johnson' or 'quantile' ")
#handle unknown categorical
if type(handle_unknown_categorical) is not bool:
sys.exit('(Type Error): handle_unknown_categorical parameter only accepts True or False.')
#unknown categorical method
unknown_categorical_method_available = ['least_frequent', 'most_frequent']
if unknown_categorical_method not in unknown_categorical_method_available:
sys.exit("(Type Error): unknown_categorical_method only accepts 'least_frequent' or 'most_frequent'.")
#check pca
if type(pca) is not bool:
sys.exit('(Type Error): PCA parameter only accepts True or False.')
#pca method check
allowed_pca_methods = ['linear', 'kernel', 'incremental',]
if pca_method not in allowed_pca_methods:
sys.exit("(Value Error): pca method param only accepts 'linear', 'kernel', or 'incremental'. ")
#pca components check
if pca is True:
if pca_method != 'linear':
if pca_components is not None:
if(type(pca_components)) is not int:
sys.exit("(Type Error): pca_components parameter must be integer when pca_method is not 'linear'. ")
#pca components check 2
if pca is True:
if pca_method != 'linear':
if pca_components is not None:
if pca_components > len(data.columns)-1:
sys.exit("(Type Error): pca_components parameter cannot be greater than original features space.")
#pca components check 3
if pca is True:
if pca_method == 'linear':
if pca_components is not None:
if type(pca_components) is not float:
if pca_components > len(data.columns)-1:
sys.exit("(Type Error): pca_components parameter cannot be greater than original features space or float between 0 - 1.")
#check ignore_low_variance
if type(ignore_low_variance) is not bool:
sys.exit('(Type Error): ignore_low_variance parameter only accepts True or False.')
#check ignore_low_variance
if type(combine_rare_levels) is not bool:
sys.exit('(Type Error): combine_rare_levels parameter only accepts True or False.')
#check rare_level_threshold
if type(rare_level_threshold) is not float:
sys.exit('(Type Error): rare_level_threshold must be a float between 0 and 1. ')
#bin numeric features
if bin_numeric_features is not None:
all_cols = list(data.columns)
all_cols.remove(target)
for i in bin_numeric_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#check transform_target
if type(transform_target) is not bool:
sys.exit('(Type Error): transform_target parameter only accepts True or False.')
#transform_target_method
allowed_transform_target_method = ['box-cox', 'yeo-johnson']
if transform_target_method not in allowed_transform_target_method:
sys.exit("(Value Error): transform_target_method param only accepts 'box-cox' or 'yeo-johnson'. ")
#remove_outliers
if type(remove_outliers) is not bool:
sys.exit('(Type Error): remove_outliers parameter only accepts True or False.')
#outliers_threshold
if type(outliers_threshold) is not float:
sys.exit('(Type Error): outliers_threshold must be a float between 0 and 1. ')
#remove_multicollinearity
if type(remove_multicollinearity) is not bool:
sys.exit('(Type Error): remove_multicollinearity parameter only accepts True or False.')
#multicollinearity_threshold
if type(multicollinearity_threshold) is not float:
sys.exit('(Type Error): multicollinearity_threshold must be a float between 0 and 1. ')
#create_clusters
if type(create_clusters) is not bool:
sys.exit('(Type Error): create_clusters parameter only accepts True or False.')
#cluster_iter
if type(cluster_iter) is not int:
sys.exit('(Type Error): cluster_iter must be a integer greater than 1. ')
#polynomial_features
if type(polynomial_features) is not bool:
sys.exit('(Type Error): polynomial_features only accepts True or False. ')
#polynomial_degree
if type(polynomial_degree) is not int:
sys.exit('(Type Error): polynomial_degree must be an integer. ')
#polynomial_features
if type(trigonometry_features) is not bool:
sys.exit('(Type Error): trigonometry_features only accepts True or False. ')
#polynomial threshold
if type(polynomial_threshold) is not float:
sys.exit('(Type Error): polynomial_threshold must be a float between 0 and 1. ')
#group features
if group_features is not None:
if type(group_features) is not list:
sys.exit('(Type Error): group_features must be of type list. ')
if group_names is not None:
if type(group_names) is not list:
sys.exit('(Type Error): group_names must be of type list. ')
#cannot drop target
if ignore_features is not None:
if target in ignore_features:
sys.exit("(Value Error): cannot drop target column. ")
#feature_selection
if type(feature_selection) is not bool:
sys.exit('(Type Error): feature_selection only accepts True or False. ')
#feature_selection_threshold
if type(feature_selection_threshold) is not float:
sys.exit('(Type Error): feature_selection_threshold must be a float between 0 and 1. ')
#feature_selection_method
if feature_selection_method not in ['boruta', 'classic']:
sys.exit("(Type Error): feature_selection_method must be string 'boruta', 'classic'")
#feature_interaction
if type(feature_interaction) is not bool:
sys.exit('(Type Error): feature_interaction only accepts True or False. ')
#feature_ratio
if type(feature_ratio) is not bool:
sys.exit('(Type Error): feature_ratio only accepts True or False. ')
#interaction_threshold
if type(interaction_threshold) is not float:
sys.exit('(Type Error): interaction_threshold must be a float between 0 and 1. ')
#cannot drop target
if ignore_features is not None:
if target in ignore_features:
sys.exit("(Value Error): cannot drop target column. ")
#forced type check
all_cols = list(data.columns)
all_cols.remove(target)
#categorical
if categorical_features is not None:
for i in categorical_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#numeric
if numeric_features is not None:
for i in numeric_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#date features
if date_features is not None:
for i in date_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#drop features
if ignore_features is not None:
for i in ignore_features:
if i not in all_cols:
sys.exit("(Value Error): Feature ignored is either target column or doesn't exist in the dataset.")
#silent
if type(silent) is not bool:
sys.exit("(Type Error): silent parameter only accepts True or False. ")
#remove_perfect_collinearity
if type(remove_perfect_collinearity) is not bool:
sys.exit('(Type Error): remove_perfect_collinearity parameter only accepts True or False.')
#html
if type(html) is not bool:
sys.exit('(Type Error): html parameter only accepts True or False.')
#folds_shuffle
if type(folds_shuffle) is not bool:
sys.exit('(Type Error): folds_shuffle parameter only accepts True or False.')
#data_split_shuffle
if type(data_split_shuffle) is not bool:
sys.exit('(Type Error): data_split_shuffle parameter only accepts True or False.')
#log_experiment
if type(log_experiment) is not bool:
sys.exit('(Type Error): log_experiment parameter only accepts True or False.')
#log_plots
if type(log_plots) is not bool:
sys.exit('(Type Error): log_plots parameter only accepts True or False.')
#log_data
if type(log_data) is not bool:
sys.exit('(Type Error): log_data parameter only accepts True or False.')
#log_profile
if type(log_profile) is not bool:
sys.exit('(Type Error): log_profile parameter only accepts True or False.')
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
import os
#pandas option
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
#global html_param
global html_param
#create html_param
html_param = html
#silent parameter to also set sampling to False
if silent:
sampling = False
logger.info("Preparing display monitor")
#progress bar
if sampling:
max = 10 + 3
else:
max = 3
progress = ipw.IntProgress(value=0, min=0, max=max, step=1 , description='Processing: ')
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['ETC' , '. . . . . . . . . . . . . . . . . .', 'Calculating ETC'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
if html_param:
display(monitor, display_id = 'monitor')
logger.info("Importing libraries")
#general dependencies
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
import random
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
#setting sklearn config to print all parameters including default
import sklearn
sklearn.set_config(print_changed_only=False)
#define highlight function for function grid to display
def highlight_max(s):
is_max = s == True
return ['background-color: yellow' if v else '' for v in is_max]
#cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
logger.info("Declaring global variables")
#declaring global variables to be accessed by other functions
global X, y, X_train, X_test, y_train, y_test, seed, prep_pipe, target_inverse_transformer, experiment__,\
preprocess, folds_shuffle_param, n_jobs_param, create_model_container, master_model_container,\
display_container, exp_name_log, logging_param, log_plots_param, data_before_preprocess, target_param,\
gpu_param
logger.info("Copying data for preprocessing")
#copy original data for pandas profiler
data_before_preprocess = data.copy()
#generate seed to be used globally
if session_id is None:
seed = random.randint(150,9000)
else:
seed = session_id
"""
preprocessing starts here
"""
monitor.iloc[1,1:] = 'Preparing Data for Modeling'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#define parameters for preprocessor
logger.info("Declaring preprocessing parameters")
#categorical features
if categorical_features is None:
cat_features_pass = []
else:
cat_features_pass = categorical_features
#numeric features
if numeric_features is None:
numeric_features_pass = []
else:
numeric_features_pass = numeric_features
#drop features
if ignore_features is None:
ignore_features_pass = []
else:
ignore_features_pass = ignore_features
#date features
if date_features is None:
date_features_pass = []
else:
date_features_pass = date_features
#categorical imputation strategy
if categorical_imputation == 'constant':
categorical_imputation_pass = 'not_available'
elif categorical_imputation == 'mode':
categorical_imputation_pass = 'most frequent'
#transformation method strategy
if transformation_method == 'yeo-johnson':
trans_method_pass = 'yj'
elif transformation_method == 'quantile':
trans_method_pass = 'quantile'
#pass method
if pca_method == 'linear':
pca_method_pass = 'pca_liner'
elif pca_method == 'kernel':
pca_method_pass = 'pca_kernal'
elif pca_method == 'incremental':
pca_method_pass = 'incremental'
elif pca_method == 'pls':
pca_method_pass = 'pls'
#pca components
if pca is True:
if pca_components is None:
if pca_method == 'linear':
pca_components_pass = 0.99
else:
pca_components_pass = int((len(data.columns)-1)*0.5)
else:
pca_components_pass = pca_components
else:
pca_components_pass = 0.99
if bin_numeric_features is None:
apply_binning_pass = False
features_to_bin_pass = []
else:
apply_binning_pass = True
features_to_bin_pass = bin_numeric_features
#trignometry
if trigonometry_features is False:
trigonometry_features_pass = []
else:
trigonometry_features_pass = ['sin', 'cos', 'tan']
#group features
#=============#
#apply grouping
if group_features is not None:
apply_grouping_pass = True
else:
apply_grouping_pass = False
#group features listing
if apply_grouping_pass is True:
if type(group_features[0]) is str:
group_features_pass = []
group_features_pass.append(group_features)
else:
group_features_pass = group_features
else:
group_features_pass = [[]]
#group names
if apply_grouping_pass is True:
if (group_names is None) or (len(group_names) != len(group_features_pass)):
group_names_pass = list(np.arange(len(group_features_pass)))
group_names_pass = ['group_' + str(i) for i in group_names_pass]
else:
group_names_pass = group_names
else:
group_names_pass = []
#feature interactions
if feature_interaction or feature_ratio:
apply_feature_interactions_pass = True
else:
apply_feature_interactions_pass = False
interactions_to_apply_pass = []
if feature_interaction:
interactions_to_apply_pass.append('multiply')
if feature_ratio:
interactions_to_apply_pass.append('divide')
#unknown categorical
if unknown_categorical_method == 'least_frequent':
unknown_categorical_method_pass = 'least frequent'
elif unknown_categorical_method == 'most_frequent':
unknown_categorical_method_pass = 'most frequent'
#ordinal_features
if ordinal_features is not None:
apply_ordinal_encoding_pass = True
else:
apply_ordinal_encoding_pass = False
if apply_ordinal_encoding_pass is True:
ordinal_columns_and_categories_pass = ordinal_features
else:
ordinal_columns_and_categories_pass = {}
if high_cardinality_features is not None:
apply_cardinality_reduction_pass = True
else:
apply_cardinality_reduction_pass = False
if high_cardinality_method == 'frequency':
cardinal_method_pass = 'count'
elif high_cardinality_method == 'clustering':
cardinal_method_pass = 'cluster'
if apply_cardinality_reduction_pass:
cardinal_features_pass = high_cardinality_features
else:
cardinal_features_pass = []
if silent:
display_dtypes_pass = False
else:
display_dtypes_pass = True
#transform target method
if transform_target_method == 'box-cox':
transform_target_method_pass = 'bc'
elif transform_target_method == 'yeo-johnson':
transform_target_method_pass = 'yj'
logger.info("Importing preprocessing module")
#import library
import pycaret.preprocess as preprocess
logger.info("Creating preprocessing pipeline")
data = preprocess.Preprocess_Path_One(train_data = data,
target_variable = target,
categorical_features = cat_features_pass,
apply_ordinal_encoding = apply_ordinal_encoding_pass,
ordinal_columns_and_categories = ordinal_columns_and_categories_pass,
apply_cardinality_reduction = apply_cardinality_reduction_pass,
cardinal_method = cardinal_method_pass,
cardinal_features = cardinal_features_pass,
numerical_features = numeric_features_pass,
time_features = date_features_pass,
features_todrop = ignore_features_pass,
numeric_imputation_strategy = numeric_imputation,
categorical_imputation_strategy = categorical_imputation_pass,
scale_data = normalize,
scaling_method = normalize_method,
Power_transform_data = transformation,
Power_transform_method = trans_method_pass,
apply_untrained_levels_treatment= handle_unknown_categorical,
untrained_levels_treatment_method = unknown_categorical_method_pass,
apply_pca = pca,
pca_method = pca_method_pass,
pca_variance_retained_or_number_of_components = pca_components_pass,
apply_zero_nearZero_variance = ignore_low_variance,
club_rare_levels = combine_rare_levels,
rara_level_threshold_percentage = rare_level_threshold,
apply_binning = apply_binning_pass,
features_to_binn = features_to_bin_pass,
remove_outliers = remove_outliers,
outlier_contamination_percentage = outliers_threshold,
outlier_methods = ['pca'], #pca hardcoded
remove_multicollinearity = remove_multicollinearity,
maximum_correlation_between_features = multicollinearity_threshold,
remove_perfect_collinearity = remove_perfect_collinearity,
cluster_entire_data = create_clusters,
range_of_clusters_to_try = cluster_iter,
apply_polynomial_trigonometry_features = polynomial_features,
max_polynomial = polynomial_degree,
trigonometry_calculations = trigonometry_features_pass,
top_poly_trig_features_to_select_percentage = polynomial_threshold,
apply_grouping = apply_grouping_pass,
features_to_group_ListofList = group_features_pass,
group_name = group_names_pass,
apply_feature_selection = feature_selection,
feature_selection_top_features_percentage = feature_selection_threshold,
feature_selection_method = feature_selection_method,
apply_feature_interactions = apply_feature_interactions_pass,
feature_interactions_to_apply = interactions_to_apply_pass,
feature_interactions_top_features_to_select_percentage=interaction_threshold,
display_types = display_dtypes_pass,
target_transformation = transform_target,
target_transformation_method = transform_target_method_pass,
random_state = seed)
progress.value += 1
logger.info("Preprocessing pipeline created successfully")
if hasattr(preprocess.dtypes, 'replacement'):
label_encoded = preprocess.dtypes.replacement
label_encoded = str(label_encoded).replace("'", '')
label_encoded = str(label_encoded).replace("{", '')
label_encoded = str(label_encoded).replace("}", '')
else:
label_encoded = 'None'
try:
res_type = ['quit','Quit','exit','EXIT','q','Q','e','E','QUIT','Exit']
res = preprocess.dtypes.response
if res in res_type:
sys.exit("(Process Exit): setup has been interupted with user command 'quit'. setup must rerun." )
except:
pass
#save prep pipe
prep_pipe = preprocess.pipe
#save target inverse transformer
try:
target_inverse_transformer = preprocess.pt_target.p_transform_target
except:
target_inverse_transformer = None
logger.info("No inverse transformer found")
logger.info("Creating grid variables")
#generate values for grid show
missing_values = data_before_preprocess.isna().sum().sum()
if missing_values > 0:
missing_flag = True
else:
missing_flag = False
if normalize is True:
normalize_grid = normalize_method
else:
normalize_grid = 'None'
if transformation is True:
transformation_grid = transformation_method
else:
transformation_grid = 'None'
if pca is True:
pca_method_grid = pca_method
else:
pca_method_grid = 'None'
if pca is True:
pca_components_grid = pca_components_pass
else:
pca_components_grid = 'None'
if combine_rare_levels:
rare_level_threshold_grid = rare_level_threshold
else:
rare_level_threshold_grid = 'None'
if bin_numeric_features is None:
numeric_bin_grid = False
else:
numeric_bin_grid = True
if remove_outliers is False:
outliers_threshold_grid = None
else:
outliers_threshold_grid = outliers_threshold
if remove_multicollinearity is False:
multicollinearity_threshold_grid = None
else:
multicollinearity_threshold_grid = multicollinearity_threshold
if create_clusters is False:
cluster_iter_grid = None
else:
cluster_iter_grid = cluster_iter
if polynomial_features:
polynomial_degree_grid = polynomial_degree
else:
polynomial_degree_grid = None
if polynomial_features or trigonometry_features:
polynomial_threshold_grid = polynomial_threshold
else:
polynomial_threshold_grid = None
if feature_selection:
feature_selection_threshold_grid = feature_selection_threshold
else:
feature_selection_threshold_grid = None
if feature_interaction or feature_ratio:
interaction_threshold_grid = interaction_threshold
else:
interaction_threshold_grid = None
if ordinal_features is not None:
ordinal_features_grid = True
else:
ordinal_features_grid = False
if handle_unknown_categorical:
unknown_categorical_method_grid = unknown_categorical_method
else:
unknown_categorical_method_grid = None
if group_features is not None:
group_features_grid = True
else:
group_features_grid = False
if high_cardinality_features is not None:
high_cardinality_features_grid = True
else:
high_cardinality_features_grid = False
if high_cardinality_features_grid:
high_cardinality_method_grid = high_cardinality_method
else:
high_cardinality_method_grid = None
learned_types = preprocess.dtypes.learent_dtypes
learned_types.drop(target, inplace=True)
float_type = 0
cat_type = 0
for i in preprocess.dtypes.learent_dtypes:
if 'float' in str(i):
float_type += 1
elif 'object' in str(i):
cat_type += 1
elif 'int' in str(i):
float_type += 1
#target transformation method
if transform_target is False:
transform_target_method_grid = None
else:
transform_target_method_grid = preprocess.pt_target.function_to_apply
"""
preprocessing ends here
"""
#reset pandas option
pd.reset_option("display.max_rows")
pd.reset_option("display.max_columns")
logger.info("Creating global containers")
#create an empty list for pickling later.
experiment__ = []
#create folds_shuffle_param
folds_shuffle_param = folds_shuffle
#create n_jobs_param
n_jobs_param = n_jobs
#create create_model_container
create_model_container = []
#create master_model_container
master_model_container = []
#create display container
display_container = []
#create logging parameter
logging_param = log_experiment
#create exp_name_log param incase logging is False
exp_name_log = 'no_logging'
#create an empty log_plots_param
if log_plots:
log_plots_param = True
else:
log_plots_param = False
# create target param
target_param = target
# create gpu param
gpu_param = use_gpu
#sample estimator
if sample_estimator is None:
model = LinearRegression(n_jobs=n_jobs_param)
else:
model = sample_estimator
model_name = str(model).split("(")[0]
if 'CatBoostRegressor' in model_name:
model_name = 'CatBoostRegressor'
#creating variables to be used later in the function
X = data.drop(target,axis=1)
y = data[target]
progress.value += 1
if sampling is True and data.shape[0] > 25000: #change back to 25000
split_perc = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99]
split_perc_text = ['10%','20%','30%','40%','50%','60%', '70%', '80%', '90%', '100%']
split_perc_tt = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99]
split_perc_tt_total = []
split_percent = []
metric_results = []
metric_name = []
counter = 0
for i in split_perc:
progress.value += 1
t0 = time.time()
'''
MONITOR UPDATE STARTS
'''
perc_text = split_perc_text[counter]
monitor.iloc[1,1:] = 'Fitting Model on ' + perc_text + ' sample'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
X_, X__, y_, y__ = train_test_split(X, y, test_size=1-i, random_state=seed, shuffle=data_split_shuffle)
X_train, X_test, y_train, y_test = train_test_split(X_, y_, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
model.fit(X_train,y_train)
pred_ = model.predict(X_test)
r2 = metrics.r2_score(y_test,pred_)
metric_results.append(r2)
metric_name.append('R2')
split_percent.append(i)
t1 = time.time()
'''
Time calculation begins
'''
tt = t1 - t0
total_tt = tt / i
split_perc_tt.pop(0)
for remain in split_perc_tt:
ss = total_tt * remain
split_perc_tt_total.append(ss)
ttt = sum(split_perc_tt_total) / 60
ttt = np.around(ttt, 2)
if ttt < 1:
ttt = str(np.around((ttt * 60), 2))
ETC = ttt + ' Seconds Remaining'
else:
ttt = str (ttt)
ETC = ttt + ' Minutes Remaining'
monitor.iloc[2,1:] = ETC
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
Time calculation Ends
'''
split_perc_tt_total = []
counter += 1
model_results = pd.DataFrame({'Sample' : split_percent, 'Metric' : metric_results, 'Metric Name': metric_name})
fig = px.line(model_results, x='Sample', y='Metric', color='Metric Name', line_shape='linear', range_y = [0,1])
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
title= str(model_name) + ' Metric and Sample %'
fig.update_layout(title={'text': title, 'y':0.95,'x':0.45,'xanchor': 'center','yanchor': 'top'})
fig.show()
monitor.iloc[1,1:] = 'Waiting for input'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
print('Please Enter the sample % of data you would like to use for modeling. Example: Enter 0.3 for 30%.')
print('Press Enter if you would like to use 100% of the data.')
print(' ')
sample_size = input("Sample Size: ")
if sample_size == '' or sample_size == '1':
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
else:
sample_n = float(sample_size)
X_selected, X_discard, y_selected, y_discard = train_test_split(X, y, test_size=1-sample_n,
random_state=seed, shuffle=data_split_shuffle)
X_train, X_test, y_train, y_test = train_test_split(X_selected, y_selected, test_size=1-train_size,
random_state=seed, shuffle=data_split_shuffle)
else:
monitor.iloc[1,1:] = 'Splitting Data'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
progress.value += 1
'''
Final display Starts
'''
clear_output()
if verbose:
print(' ')
if profile:
print('Setup Succesfully Completed. Loading Profile Now... Please Wait!')
else:
if verbose:
print('Setup Succesfully Completed.')
functions = pd.DataFrame ( [ ['session_id', seed ],
['Transform Target ', transform_target],
['Transform Target Method', transform_target_method_grid],
['Original Data', data_before_preprocess.shape ],
['Missing Values ', missing_flag],
['Numeric Features ', str(float_type) ],
['Categorical Features ', str(cat_type) ],
['Ordinal Features ', ordinal_features_grid],
['High Cardinality Features ', high_cardinality_features_grid],
['High Cardinality Method ', high_cardinality_method_grid],
['Sampled Data', '(' + str(X_train.shape[0] + X_test.shape[0]) + ', ' + str(data_before_preprocess.shape[1]) + ')' ],
['Transformed Train Set', X_train.shape ],
['Transformed Test Set',X_test.shape ],
['Numeric Imputer ', numeric_imputation],
['Categorical Imputer ', categorical_imputation],
['Normalize ', normalize ],
['Normalize Method ', normalize_grid ],
['Transformation ', transformation ],
['Transformation Method ', transformation_grid ],
['PCA ', pca],
['PCA Method ', pca_method_grid],
['PCA Components ', pca_components_grid],
['Ignore Low Variance ', ignore_low_variance],
['Combine Rare Levels ', combine_rare_levels],
['Rare Level Threshold ', rare_level_threshold_grid],
['Numeric Binning ', numeric_bin_grid],
['Remove Outliers ', remove_outliers],
['Outliers Threshold ', outliers_threshold_grid],
['Remove Multicollinearity ', remove_multicollinearity],
['Multicollinearity Threshold ', multicollinearity_threshold_grid],
['Clustering ', create_clusters],
['Clustering Iteration ', cluster_iter_grid],
['Polynomial Features ', polynomial_features],
['Polynomial Degree ', polynomial_degree_grid],
['Trignometry Features ', trigonometry_features],
['Polynomial Threshold ', polynomial_threshold_grid],
['Group Features ', group_features_grid],
['Feature Selection ', feature_selection],
['Features Selection Threshold ', feature_selection_threshold_grid],
['Feature Interaction ', feature_interaction],
['Feature Ratio ', feature_ratio],
['Interaction Threshold ', interaction_threshold_grid],
], columns = ['Description', 'Value'] )
functions_ = functions.style.apply(highlight_max)
if verbose:
if html_param:
display(functions_)
else:
print(functions_.data)
if profile:
try:
import pandas_profiling
pf = pandas_profiling.ProfileReport(data_before_preprocess)
clear_output()
display(pf)
except:
print('Data Profiler Failed. No output to show, please continue with Modeling.')
'''
Final display Ends
'''
#log into experiment
experiment__.append(('Regression Setup Config', functions))
experiment__.append(('X_training Set', X_train))
experiment__.append(('y_training Set', y_train))
experiment__.append(('X_test Set', X_test))
experiment__.append(('y_test Set', y_test))
experiment__.append(('Transformation Pipeline', prep_pipe))
try:
experiment__.append(('Target Inverse Transformer', target_inverse_transformer))
except:
pass
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
if logging_param:
logger.info("Logging experiment in MLFlow")
import mlflow
from pathlib import Path
if experiment_name is None:
exp_name_ = 'reg-default-name'
else:
exp_name_ = experiment_name
URI = secrets.token_hex(nbytes=4)
exp_name_log = exp_name_
try:
mlflow.create_experiment(exp_name_log)
except:
pass
#mlflow logging
mlflow.set_experiment(exp_name_log)
run_name_ = 'Session Initialized ' + str(USI)
with mlflow.start_run(run_name=run_name_) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
k = functions.copy()
k.set_index('Description',drop=True,inplace=True)
kdict = k.to_dict()
params = kdict.get('Value')
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "setup")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log the transformation pipeline
logger.info("SubProcess save_model() called ==================================")
save_model(prep_pipe, 'Transformation Pipeline', verbose=False)
logger.info("SubProcess save_model() end ==================================")
mlflow.log_artifact('Transformation Pipeline' + '.pkl')
os.remove('Transformation Pipeline.pkl')
# Log pandas profile
if log_profile:
import pandas_profiling
pf = pandas_profiling.ProfileReport(data_before_preprocess)
pf.to_file("Data Profile.html")
mlflow.log_artifact("Data Profile.html")
os.remove("Data Profile.html")
clear_output()
display(functions_)
# Log training and testing set
if log_data:
X_train.join(y_train).to_csv('Train.csv')
X_test.join(y_test).to_csv('Test.csv')
mlflow.log_artifact("Train.csv")
mlflow.log_artifact("Test.csv")
os.remove('Train.csv')
os.remove('Test.csv')
logger.info("create_model_container: " + str(len(create_model_container)))
logger.info("master_model_container: " + str(len(master_model_container)))
logger.info("display_container: " + str(len(display_container)))
logger.info("setup() succesfully completed......................................")
return X, y, X_train, X_test, y_train, y_test, seed, prep_pipe, target_inverse_transformer,\
experiment__, folds_shuffle_param, n_jobs_param, html_param, create_model_container,\
master_model_container, display_container, exp_name_log, logging_param, log_plots_param, USI,\
data_before_preprocess, target_param
def compare_models(exclude = None,
include = None, #added in pycaret==2.0.0
fold = 10,
round = 4,
sort = 'R2',
n_select = 1, #added in pycaret==2.0.0
budget_time = 0, #added in pycaret==2.1.0
turbo = True,
verbose = True): #added in pycaret==2.0.0
"""
This function train all the models available in the model library and scores them
using Kfold Cross Validation. The output prints a score grid with MAE, MSE
RMSE, R2, RMSLE and MAPE (averaged accross folds), determined by fold parameter.
This function returns the best model based on metric defined in sort parameter.
To select top N models, use n_select parameter that is set to 1 by default.
Where n_select parameter > 1, it will return a list of trained model objects.
When turbo is set to True ('kr', 'ard' and 'mlp') are excluded due to longer
training times. By default turbo param is set to True.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> experiment_name = setup(data = boston, target = 'medv')
>>> best_model = compare_models()
This will return the averaged score grid of all models except 'kr', 'ard'
and 'mlp'. When turbo param is set to False, all models including 'kr',
'ard' and 'mlp' are used, but this may result in longer training times.
>>> best_model = compare_models(exclude = ['knn','gbr'], turbo = False)
This will return a comparison of all models except K Nearest Neighbour and
Gradient Boosting Regressor.
>>> best_model = compare_models(exclude = ['knn','gbr'] , turbo = True)
This will return a comparison of all models except K Nearest Neighbour,
Gradient Boosting Regressor, Kernel Ridge Regressor, Automatic Relevance
Determinant and Multi Level Perceptron.
Parameters
----------
exclude: list of strings, default = None
In order to omit certain models from the comparison model ID's can be passed as
a list of strings in exclude param.
include: list of strings, default = None
In order to run only certain models for the comparison, the model ID's can be
passed as a list of strings in include param.
fold: integer, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
round: integer, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
sort: string, default = 'MAE'
The scoring measure specified is used for sorting the average score grid
Other options are 'MAE', 'MSE', 'RMSE', 'R2', 'RMSLE' and 'MAPE'.
n_select: int, default = 1
Number of top_n models to return. use negative argument for bottom selection.
for example, n_select = -3 means bottom 3 models.
budget_time: int or float, default = 0
If set above 0, will terminate execution of the function after budget_time minutes have
passed and return results up to that point.
turbo: Boolean, default = True
When turbo is set to True, it excludes estimators that have longer
training times.
verbose: Boolean, default = True
Score grid is not printed when verbose is set to False.
Returns
-------
score_grid
A table containing the scores of the model across the kfolds.
Scoring metrics used are MAE, MSE, RMSE, R2, RMSLE and MAPE
Mean and standard deviation of the scores across the folds is
also returned.
Warnings
--------
- compare_models() though attractive, might be time consuming with large
datasets. By default turbo is set to True, which excludes models that
have longer training times. Changing turbo parameter to False may result
in very high training times with datasets where number of samples exceed
10,000.
"""
'''
ERROR HANDLING STARTS HERE
'''
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing compare_models()")
logger.info("""compare_models(exclude={}, include={}, fold={}, round={}, sort={}, n_select={}, turbo={}, verbose={})""".\
format(str(exclude), str(include), str(fold), str(round), str(sort), str(n_select), str(turbo), str(verbose)))
logger.info("Checking exceptions")
#exception checking
import sys
#checking error for exclude (string)
available_estimators = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard', 'par',
'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf', 'et', 'ada', 'gbr',
'mlp', 'xgboost', 'lightgbm', 'catboost']
if exclude != None:
for i in exclude:
if i not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
if include != None:
for i in include:
if i not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
#include and exclude together check
if include is not None:
if exclude is not None:
sys.exit('(Type Error): Cannot use exclude parameter when include is used to compare models.')
#checking fold parameter
if type(fold) is not int:
sys.exit('(Type Error): Fold parameter only accepts integer value.')
#checking round parameter
if type(round) is not int:
sys.exit('(Type Error): Round parameter only accepts integer value.')
#checking n_select parameter
if type(n_select) is not int:
sys.exit('(Type Error): n_select parameter only accepts integer value.')
#checking budget_time parameter
if type(budget_time) is not int and type(budget_time) is not float:
sys.exit('(Type Error): budget_time parameter only accepts integer or float values.')
#checking sort parameter
allowed_sort = ['MAE', 'MSE', 'RMSE', 'R2', 'RMSLE', 'MAPE']
if sort not in allowed_sort:
sys.exit('(Value Error): Sort method not supported. See docstring for list of available parameters.')
'''
ERROR HANDLING ENDS HERE
'''
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import time, datetime
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
pd.set_option('display.max_columns', 500)
logger.info("Preparing display monitor")
#progress bar
if exclude is None:
len_of_exclude = 0
else:
len_of_exclude = len(exclude)
if turbo:
len_mod = 22 - len_of_exclude
else:
len_mod = 25 - len_of_exclude
#n_select param
if type(n_select) is list:
n_select_num = len(n_select)
else:
n_select_num = abs(n_select)
if n_select_num > len_mod:
n_select_num = len_mod
if include is not None:
wl = len(include)
bl = len_of_exclude
len_mod = wl - bl
if include is not None:
opt = 10
else:
opt = 30
#display
progress = ipw.IntProgress(value=0, min=0, max=(fold*len_mod)+opt+n_select_num, step=1 , description='Processing: ')
master_display = pd.DataFrame(columns=['Model', 'MAE','MSE','RMSE', 'R2', 'RMSLE', 'MAPE', 'TT (Sec)'])
#display monitor only when html_param is set to True
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['Estimator' , '. . . . . . . . . . . . . . . . . .' , 'Compiling Library' ],
['ETC' , '. . . . . . . . . . . . . . . . . .', 'Calculating ETC'] ],
columns=['', ' ', ' ']).set_index('')
#display only when html_param is set to True
if verbose:
if html_param:
display(monitor, display_id = 'monitor')
display_ = display(master_display, display_id=True)
display_id = display_.display_id
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
#general dependencies
import numpy as np
import random
from sklearn import metrics
from sklearn.model_selection import KFold
import pandas.io.formats.style
logger.info("Copying training dataset")
#Storing X_train and y_train in data_X and data_y parameter
data_X = X_train.copy()
data_y = y_train.copy()
#reset index
data_X.reset_index(drop=True, inplace=True)
data_y.reset_index(drop=True, inplace=True)
progress.value += 1
logger.info("Importing libraries")
#import sklearn dependencies
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lars
from sklearn.linear_model import LassoLars
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import ARDRegression
from sklearn.linear_model import PassiveAggressiveRegressor
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import TheilSenRegressor
from sklearn.linear_model import HuberRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
try:
import lightgbm as lgb
except:
pass
logger.info("LightGBM import failed")
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Loading Estimator'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
logger.info("Importing untrained models")
#creating model object
lr = LinearRegression(n_jobs=n_jobs_param)
lasso = Lasso(random_state=seed)
ridge = Ridge(random_state=seed)
en = ElasticNet(random_state=seed)
lar = Lars()
llar = LassoLars()
omp = OrthogonalMatchingPursuit()
br = BayesianRidge()
ard = ARDRegression()
par = PassiveAggressiveRegressor(random_state=seed)
ransac = RANSACRegressor(min_samples=0.5, random_state=seed)
tr = TheilSenRegressor(random_state=seed, n_jobs=n_jobs_param)
huber = HuberRegressor()
kr = KernelRidge()
svm = SVR()
knn = KNeighborsRegressor(n_jobs=n_jobs_param)
dt = DecisionTreeRegressor(random_state=seed)
rf = RandomForestRegressor(random_state=seed, n_jobs=n_jobs_param)
et = ExtraTreesRegressor(random_state=seed, n_jobs=n_jobs_param)
ada = AdaBoostRegressor(random_state=seed)
gbr = GradientBoostingRegressor(random_state=seed)
mlp = MLPRegressor(random_state=seed)
xgboost = XGBRegressor(random_state=seed, n_jobs=n_jobs_param, verbosity=0)
lightgbm = lgb.LGBMRegressor(random_state=seed, n_jobs=n_jobs_param)
catboost = CatBoostRegressor(random_state=seed, silent = True, thread_count=n_jobs_param)
logger.info("Import successful")
progress.value += 1
model_dict = {'Linear Regression' : 'lr',
'Lasso Regression' : 'lasso',
'Ridge Regression' : 'ridge',
'Elastic Net' : 'en',
'Least Angle Regression' : 'lar',
'Lasso Least Angle Regression' : 'llar',
'Orthogonal Matching Pursuit' : 'omp',
'Bayesian Ridge' : 'br',
'Automatic Relevance Determination' : 'ard',
'Passive Aggressive Regressor' : 'par',
'Random Sample Consensus' : 'ransac',
'TheilSen Regressor' : 'tr',
'Huber Regressor' : 'huber',
'Kernel Ridge' : 'kr',
'Support Vector Machine' : 'svm',
'K Neighbors Regressor' : 'knn',
'Decision Tree' : 'dt',
'Random Forest' : 'rf',
'Extra Trees Regressor' : 'et',
'AdaBoost Regressor' : 'ada',
'Gradient Boosting Regressor' : 'gbr',
'Multi Level Perceptron' : 'mlp',
'Extreme Gradient Boosting' : 'xgboost',
'Light Gradient Boosting Machine' : 'lightgbm',
'CatBoost Regressor' : 'catboost'}
model_library = [lr, lasso, ridge, en, lar, llar, omp, br, ard, par, ransac, tr, huber, kr,
svm, knn, dt, rf, et, ada, gbr, mlp, xgboost, lightgbm, catboost]
model_names = ['Linear Regression',
'Lasso Regression',
'Ridge Regression',
'Elastic Net',
'Least Angle Regression',
'Lasso Least Angle Regression',
'Orthogonal Matching Pursuit',
'Bayesian Ridge',
'Automatic Relevance Determination',
'Passive Aggressive Regressor',
'Random Sample Consensus',
'TheilSen Regressor',
'Huber Regressor',
'Kernel Ridge',
'Support Vector Machine',
'K Neighbors Regressor',
'Decision Tree',
'Random Forest',
'Extra Trees Regressor',
'AdaBoost Regressor',
'Gradient Boosting Regressor',
'Multi Level Perceptron',
'Extreme Gradient Boosting',
'Light Gradient Boosting Machine',
'CatBoost Regressor']
#checking for exclude models
model_library_str = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard',
'par', 'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf',
'et', 'ada', 'gbr', 'mlp', 'xgboost', 'lightgbm', 'catboost']
model_library_str_ = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard',
'par', 'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf',
'et', 'ada', 'gbr', 'mlp', 'xgboost', 'lightgbm', 'catboost']
if exclude is not None:
if turbo:
internal_exclude = ['kr', 'ard', 'mlp']
compiled_exclude = exclude + internal_exclude
exclude = list(set(compiled_exclude))
else:
exclude = exclude
for i in exclude:
model_library_str_.remove(i)
si = []
for i in model_library_str_:
s = model_library_str.index(i)
si.append(s)
model_library_ = []
model_names_= []
for i in si:
model_library_.append(model_library[i])
model_names_.append(model_names[i])
model_library = model_library_
model_names = model_names_
if exclude is None and turbo is True:
model_library = [lr, lasso, ridge, en, lar, llar, omp, br, par, ransac, tr, huber,
svm, knn, dt, rf, et, ada, gbr, xgboost, lightgbm, catboost]
model_names = ['Linear Regression',
'Lasso Regression',
'Ridge Regression',
'Elastic Net',
'Least Angle Regression',
'Lasso Least Angle Regression',
'Orthogonal Matching Pursuit',
'Bayesian Ridge',
'Passive Aggressive Regressor',
'Random Sample Consensus',
'TheilSen Regressor',
'Huber Regressor',
'Support Vector Machine',
'K Neighbors Regressor',
'Decision Tree',
'Random Forest',
'Extra Trees Regressor',
'AdaBoost Regressor',
'Gradient Boosting Regressor',
'Extreme Gradient Boosting',
'Light Gradient Boosting Machine',
'CatBoost Regressor']
#checking for include models
if include is not None:
model_library = []
model_names = []
for i in include:
if i == 'lr':
model_library.append(lr)
model_names.append('Linear Regression')
elif i == 'lasso':
model_library.append(lasso)
model_names.append('Lasso Regression')
elif i == 'ridge':
model_library.append(ridge)
model_names.append('Ridge Regression')
elif i == 'en':
model_library.append(en)
model_names.append('Elastic Net')
elif i == 'lar':
model_library.append(lar)
model_names.append('Least Angle Regression')
elif i == 'llar':
model_library.append(llar)
model_names.append('Lasso Least Angle Regression')
elif i == 'omp':
model_library.append(omp)
model_names.append('Orthogonal Matching Pursuit')
elif i == 'br':
model_library.append(br)
model_names.append('Bayesian Ridge')
elif i == 'ard':
model_library.append(ard)
model_names.append('Automatic Relevance Determination')
elif i == 'par':
model_library.append(par)
model_names.append('Passive Aggressive Regressor')
elif i == 'ransac':
model_library.append(ransac)
model_names.append('Random Sample Consensus')
elif i == 'tr':
model_library.append(tr)
model_names.append('TheilSen Regressor')
elif i == 'huber':
model_library.append(huber)
model_names.append('Huber Regressor')
elif i == 'kr':
model_library.append(kr)
model_names.append('Kernel Ridge')
elif i == 'svm':
model_library.append(svm)
model_names.append('Support Vector Machine')
elif i == 'knn':
model_library.append(knn)
model_names.append('K Neighbors Regressor')
elif i == 'dt':
model_library.append(dt)
model_names.append('Decision Tree')
elif i == 'rf':
model_library.append(rf)
model_names.append('Random Forest')
elif i == 'et':
model_library.append(et)
model_names.append('Extra Trees Regressor')
elif i == 'ada':
model_library.append(ada)
model_names.append('AdaBoost Regressor')
elif i == 'gbr':
model_library.append(gbr)
model_names.append('Gradient Boosting Regressor')
elif i == 'mlp':
model_library.append(mlp)
model_names.append('Multi Level Perceptron')
elif i == 'xgboost':
model_library.append(xgboost)
model_names.append('Extreme Gradient Boosting')
elif i == 'lightgbm':
model_library.append(lightgbm)
model_names.append('Light Gradient Boosting Machine')
elif i == 'catboost':
model_library.append(catboost)
model_names.append('CatBoost Regressor')
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Initializing CV'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
#cross validation setup starts here
logger.info("Defining folds")
kf = KFold(fold, random_state=seed, shuffle=folds_shuffle_param)
logger.info("Declaring metric variables")
score_mae =np.empty((0,0))
score_mse =np.empty((0,0))
score_rmse =np.empty((0,0))
score_rmsle =np.empty((0,0))
score_r2 =np.empty((0,0))
score_mape =np.empty((0,0))
score_training_time=np.empty((0,0))
avgs_mae =np.empty((0,0))
avgs_mse =np.empty((0,0))
avgs_rmse =np.empty((0,0))
avgs_rmsle =np.empty((0,0))
avgs_r2 =np.empty((0,0))
avgs_mape =np.empty((0,0))
avgs_training_time=np.empty((0,0))
def calculate_mape(actual, prediction):
mask = actual != 0
return (np.fabs(actual - prediction)/actual)[mask].mean()
#create URI (before loop)
import secrets
URI = secrets.token_hex(nbytes=4)
name_counter = 0
model_store = []
total_runtime_start = time.time()
total_runtime = 0
over_time_budget = False
if budget_time and budget_time > 0:
logger.info(f"Time budget is {budget_time} minutes")
for model in model_library:
logger.info("Initializing " + str(model_names[name_counter]))
#run_time
runtime_start = time.time()
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[2,1:] = model_names[name_counter]
monitor.iloc[3,1:] = 'Calculating ETC'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
fold_num = 1
model_store_by_fold = []
for train_i , test_i in kf.split(data_X,data_y):
logger.info("Initializing Fold " + str(fold_num))
progress.value += 1
t0 = time.time()
total_runtime += (t0 - total_runtime_start)/60
logger.info(f"Total runtime is {total_runtime} minutes")
over_time_budget = budget_time and budget_time > 0 and total_runtime > budget_time
if over_time_budget:
logger.info(f"Total runtime {total_runtime} is over time budget by {total_runtime - budget_time}, breaking loop")
break
total_runtime_start = t0
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Fitting Fold ' + str(fold_num) + ' of ' + str(fold)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
Xtrain,Xtest = data_X.iloc[train_i], data_X.iloc[test_i]
ytrain,ytest = data_y.iloc[train_i], data_y.iloc[test_i]
time_start=time.time()
logger.info("Fitting Model")
model_store_by_fold.append(model.fit(Xtrain,ytrain))
logger.info("Evaluating Metrics")
time_end=time.time()
pred_ = model.predict(Xtest)
try:
pred_ = target_inverse_transformer.inverse_transform(np.array(pred_).reshape(-1,1))
ytest = target_inverse_transformer.inverse_transform(np.array(ytest).reshape(-1,1))
pred_ = np.nan_to_num(pred_)
ytest = np.nan_to_num(ytest)
except:
pass
logger.info("No inverse transformer found")
logger.info("Compiling Metrics")
mae = metrics.mean_absolute_error(ytest,pred_)
mse = metrics.mean_squared_error(ytest,pred_)
rmse = np.sqrt(mse)
r2 = metrics.r2_score(ytest,pred_)
rmsle = np.sqrt(np.mean(np.power(np.log(np.array(abs(pred_))+1) - np.log(np.array(abs(ytest))+1), 2)))
mape = calculate_mape(ytest,pred_)
training_time=time_end-time_start
score_mae = np.append(score_mae,mae)
score_mse = np.append(score_mse,mse)
score_rmse = np.append(score_rmse,rmse)
score_rmsle = np.append(score_rmsle,rmsle)
score_r2 =np.append(score_r2,r2)
score_mape = np.append(score_mape,mape)
score_training_time=np.append(score_training_time,training_time)
'''
TIME CALCULATION SUB-SECTION STARTS HERE
'''
t1 = time.time()
tt = (t1 - t0) * (fold-fold_num) / 60
tt = np.around(tt, 2)
if tt < 1:
tt = str(np.around((tt * 60), 2))
ETC = tt + ' Seconds Remaining'
else:
tt = str (tt)
ETC = tt + ' Minutes Remaining'
fold_num += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[3,1:] = ETC
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
if over_time_budget:
break
model_store.append(model_store_by_fold[0])
logger.info("Calculating mean and std")
avgs_mae = np.append(avgs_mae,np.mean(score_mae))
avgs_mse = np.append(avgs_mse,np.mean(score_mse))
avgs_rmse = np.append(avgs_rmse,np.mean(score_rmse))
avgs_rmsle = np.append(avgs_rmsle,np.mean(score_rmsle))
avgs_r2 = np.append(avgs_r2,np.mean(score_r2))
avgs_mape = np.append(avgs_mape,np.mean(score_mape))
avgs_training_time = np.append(avgs_training_time,np.mean(score_training_time))
logger.info("Creating metrics dataframe")
compare_models_ = pd.DataFrame({'Model':model_names[name_counter], 'MAE':avgs_mae, 'MSE':avgs_mse,
'RMSE':avgs_rmse, 'R2':avgs_r2, 'RMSLE':avgs_rmsle, 'MAPE':avgs_mape, 'TT (Sec)':avgs_training_time})
master_display = pd.concat([master_display, compare_models_],ignore_index=True)
master_display = master_display.round(round)
if sort == 'R2':
master_display = master_display.sort_values(by=sort,ascending=False)
else:
master_display = master_display.sort_values(by=sort,ascending=True)
master_display.reset_index(drop=True, inplace=True)
if verbose:
if html_param:
update_display(master_display, display_id = display_id)
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
"""
MLflow logging starts here
"""
if logging_param:
logger.info("Creating MLFlow logs")
import mlflow
from pathlib import Path
import os
run_name = model_names[name_counter]
with mlflow.start_run(run_name=run_name) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
params = model.get_params()
for i in list(params):
v = params.get(i)
if len(str(v)) > 250:
params.pop(i)
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "compare_models")
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
#Log top model metrics
mlflow.log_metric("MAE", avgs_mae[0])
mlflow.log_metric("MSE", avgs_mse[0])
mlflow.log_metric("RMSE", avgs_rmse[0])
mlflow.log_metric("R2", avgs_r2[0])
mlflow.log_metric("RMSLE", avgs_rmsle[0])
mlflow.log_metric("MAPE", avgs_mape[0])
mlflow.log_metric("TT", avgs_training_time[0])
# Log model and transformation pipeline
from copy import deepcopy
# get default conda env
from mlflow.sklearn import get_default_conda_env
default_conda_env = get_default_conda_env()
default_conda_env['name'] = str(exp_name_log) + '-env'
default_conda_env.get('dependencies').pop(-3)
dependencies = default_conda_env.get('dependencies')[-1]
from pycaret.utils import __version__
dep = 'pycaret==' + str(__version__())
dependencies['pip'] = [dep]
# define model signature
from mlflow.models.signature import infer_signature
signature = infer_signature(data_before_preprocess.drop([target_param], axis=1))
input_example = data_before_preprocess.drop([target_param], axis=1).iloc[0].to_dict()
# log model as sklearn flavor
prep_pipe_temp = deepcopy(prep_pipe)
prep_pipe_temp.steps.append(['trained model', model])
mlflow.sklearn.log_model(prep_pipe_temp, "model", conda_env = default_conda_env, signature = signature, input_example = input_example)
del(prep_pipe_temp)
score_mae =np.empty((0,0))
score_mse =np.empty((0,0))
score_rmse =np.empty((0,0))
score_rmsle =np.empty((0,0))
score_r2 =np.empty((0,0))
score_mape =np.empty((0,0))
score_training_time=np.empty((0,0))
avgs_mae = np.empty((0,0))
avgs_mse = np.empty((0,0))
avgs_rmse = np.empty((0,0))
avgs_rmsle = np.empty((0,0))
avgs_r2 = np.empty((0,0))
avgs_mape = np.empty((0,0))
avgs_training_time=np.empty((0,0))
name_counter += 1
progress.value += 1
def highlight_min(s):
if s.name=='R2':# min
to_highlight = s == s.max()
else:
to_highlight = s == s.min()
return ['background-color: yellow' if v else '' for v in to_highlight]
def highlight_cols(s):
color = 'lightgrey'
return 'background-color: %s' % color
compare_models_ = master_display.style.apply(highlight_min,subset=['MAE','MSE','RMSE','R2','RMSLE','MAPE'])\
.applymap(highlight_cols, subset = ['TT (Sec)'])
compare_models_ = compare_models_.set_precision(round)
compare_models_ = compare_models_.set_properties(**{'text-align': 'left'})
compare_models_ = compare_models_.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
progress.value += 1
monitor.iloc[1,1:] = 'Compiling Final Model'
monitor.iloc[3,1:] = 'Almost Finished'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
sorted_model_names = list(compare_models_.data['Model'])
n_select = n_select if n_select <= len(sorted_model_names) else len(sorted_model_names)
if n_select < 0:
sorted_model_names = sorted_model_names[n_select:]
else:
sorted_model_names = sorted_model_names[:n_select]
model_store_final = []
logger.info("Finalizing top_n models")
logger.info("SubProcess create_model() called ==================================")
for i in sorted_model_names:
monitor.iloc[2,1:] = i
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
progress.value += 1
k = model_dict.get(i)
m = create_model(estimator=k, verbose = False, system=False, cross_validation=True)
model_store_final.append(m)
logger.info("SubProcess create_model() end ==================================")
if len(model_store_final) == 1:
model_store_final = model_store_final[0]
clear_output()
if verbose:
if html_param:
display(compare_models_)
else:
print(compare_models_.data)
| pd.reset_option("display.max_columns") | pandas.reset_option |
import pandas as pd
from pandas.core.frame import DataFrame
pd.options.display.max_rows=None
pd.options.display.max_columns=None
Actores = 'actores'
NombreArchivo = f'Base_de_datos_{Actores}.ods'
df_rows = pd.read_excel(NombreArchivo) #, index_col=0
df_rows2 = pd.read_excel(NombreArchivo, skiprows=range(0,1))
rows = df_rows.values.tolist()
df_cols = df_rows.T
cols = df_cols.values.tolist()
rows2 = df_rows2.values.tolist()
df_cols2 = df_rows2.T
cols2 = df_cols2.values.tolist()
# Defino la función para averiguar la columna que quiere elegir
def col_of_list(lis, print1, print2):
i = ''
n = 0
col_eleg_l = '^*:;)=/q$·¡ª!3·~¬€fhg@|}j{[d]+`^*!vv"g$fb$*^Çç¨_:;;' #imposible de acertar por el usuario
while col_eleg_l != i.lower():
if n >=1:
print('\nOPCIÓN NO VÁLIDA')
print(print1)
for caracteristica in lis[:-1]:
print(caracteristica, end=", ")
print(lis[-1])
columna_elegida = input(print2)
col_eleg_l = columna_elegida.lower()
for i in lis:
if col_eleg_l.lower() == i.lower():
break
n += 1
columna = lis.index(i)
return columna, col_eleg_l
repetir = 's'
lista_de_columnas_elegidas = [0, 2, 4]
while repetir == 's':
# Uso la función
columna, col_eleg_l = col_of_list(rows[0], '\nDe estas características:',
'\n¿Qué característica quieres elegir?: ')
if columna not in lista_de_columnas_elegidas:
lista_de_columnas_elegidas.append(columna)
# Especifico para cuando escoge edad o altura, porque son números
if col_eleg_l == 'edad' or col_eleg_l == 'estatura' or col_eleg_l == 'numero de actor':
obj1 = float(input(f'\n¿Qué {col_eleg_l} mínima tiene la persona que buscas? (Decimales con punto y sin uds): '))
obj2 = float(input(f'¿Qué {col_eleg_l} máxima tiene la persona que buscas? (Decimales con punto y sin uds): '))
if obj1 > obj2:
obj1, obj2 = obj2, obj1
n2 = 1
n1 = 0
for i2 in cols2[columna]:
if obj1 > i2 or i2 > obj2:
rows.pop(n2-n1) # Quitar la fila que no cumpla el requisito
rows2.pop(n2-n1-1)
n1 += 1
n2 += 1
# Especifico para cuando escoge las demás que no son números
obj = '^*:;)=/q$·¡ª!3·~¬€fhg@|}j{[d]+`^*!vv"g$fb$*^Çç¨_:;;' #imposible de acertar por el usuario
if col_eleg_l == 'edad' or col_eleg_l == 'estatura' or col_eleg_l == 'numero de actor':
pass
else:
while obj.lower() not in [elementos.lower() for elementos in cols2[columna]]:
if col_eleg_l == 'sexo':
obj = input(f'¿A qué {col_eleg_l} pertenece la persona que buscas?: ')
elif col_eleg_l == 'ciudad':
obj = input(f'¿En qué {col_eleg_l} vive la persona que buscas?: ')
elif col_eleg_l == 'idiomas':
obj = input(f'¿Qué {col_eleg_l} sabe la persona que buscas?: ')
elif col_eleg_l == 'carnet de conducir':
obj = input(f'¿Tiene {col_eleg_l} la persona que buscas?: ')
else:
obj = input(f'¿Qué {col_eleg_l} tiene la persona que buscas?: ')
n4 = 1
n3 = 0
for i3 in cols2[columna]:
if obj.lower() != i3.lower():
rows.pop(n4-n3) # Quitar la fila que no cumpla el requisito
rows2.pop(n4-n3-1)
n3 += 1
n4 += 1
df_rows = DataFrame(rows)
lista_de_columnas_elegidas = sorted(lista_de_columnas_elegidas) #ordena las columnas del dataframe
print(df_rows[lista_de_columnas_elegidas])
cols = DataFrame(rows).T.values.tolist()
cols2 = DataFrame(rows2).T.values.tolist()
repetir = input('\nQuieres repetir el proceso para reducir la selección? (S/N): ').lower()
while repetir != 's' and repetir != 'n':
repetir = input('\nQuieres repetir el proceso para reducir la selección? (S/N): ').lower()
retirada_manual = input('\nQuieres retirar algún actor de esta lista de forma manual? (S/N): ').lower()
while retirada_manual == 's':
while retirada_manual != 's' and retirada_manual != 'n':
retirada_manual = input('\nQuieres retirar algún actor de esta lista de forma manual? (S/N): ').lower()
if retirada_manual == 's':
while True:
num_actor_a_quitar = int(input('\nEl actor que quieres eliminar, ¿qué número ocupa en la lista?: '))
if num_actor_a_quitar == 0:
print('El 0 quitaría el título, elige otro número (el 1 es el primero)')
elif num_actor_a_quitar <= len(cols2[0]):
break
rows.pop(num_actor_a_quitar)
df_rows = | DataFrame(rows) | pandas.core.frame.DataFrame |
import numpy as np
from typing import Tuple, List
import cv2
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from skimage.color import label2rgb
from skimage import img_as_ubyte
from skimage.measure import block_reduce
import pandas as pd
from .basic import saturation_rectified_intensity, fg_pts
def gmm(data: np.ndarray, n: int, method: str = 'default') -> \
Tuple[List[int], List[int]]:
"""
:param data: list of input
:param n: number of components
:param method: 'default' or 'bayesian'
:return: (labels, means)
"""
# To avoid error, the number of components should be
# no more than the length of input data.
noc = min(len(data), n)
if method.lower() == 'bayesian':
model = BayesianGaussianMixture(n_components=noc, random_state=123)
model.fit(data)
else:
model = GaussianMixture(n_components=noc, random_state=123)
model.fit(data)
return model.predict(data), model.means_
def global_gmm(image: np.ndarray, mask: np.ndarray,
n: int, patch: Tuple[int, int]) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray, pd.DataFrame]:
"""
:param image: grayscale image, 2D numpy array
:param mask: binary image, 2D binary numpy array
:param n: number of components
:param patch: size of block
:return: (3D numpy array, 2D numpy array, pandas data frame)
Solve GMM for the image histogram, iteratively find
the minimalist mean of GMM models and separate the
corresponding points.
"""
# Down sample the images and masks to reduce calculation.
image_down = block_reduce(image, patch, np.mean, 255)
mask_down = block_reduce(mask, patch, np.min)
mask_out = mask_down.copy()
global_mean = int(np.mean(image_down[mask_down > 0]))
label_out = np.zeros(mask_down.shape, dtype=np.uint8)
nol = 0 # The count of labels.
model_out = | pd.DataFrame(columns=[0, 1]) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modelagem em tempo real | COVID-19 no Brasil
--------------------------------------------
Ideias e modelagens desenvolvidas pela trinca:
. <NAME>
. <NAME>
. <NAME>
Esta modelagem possui as seguintes características:
a) NÃO seguimos modelos paramétricos => Não existem durante a epidemia dados
suficientes ou confiáveis para alimentar modelos epidemiológicos como a excelente
calaculadora http://gabgoh.github.io/COVID/index.html (ela serve para gerar cená-
rios e para modelar a epidemia DEPOIS que ela passar). Além disso, a natureza
exponencial das curvas as torna extremamente sensíveis aos parâmetros que a defi-
nem. Isso faz com que a confiabilidade preditiva desses modelos seja ilusória.
b) A evolução epidemia no Brasil começou depois da de outros países. Nossa mode-
lagem se apoia nesse fato. Com os dados disponíveis, procuramos no instante pre-
sente determinar quem estamos seguindo, ou seja, que países mais se pareceram
conosco passado o mesmo período de disseminação. A partir do que aconteceu nesses
países projetamos o que pode acontecer aqui.
c) Esta conta é refeita dia a dia. Dependendo de nossa competência em conter ou
não a disseminação do Covid-19 nos aproximaremos dos países que melhor ou pior
lidaram com a epidemia e a projeção refletirá essa similaridade.
d) As decisões de modelagem são indicadas no código com os zoinhos: # ◔◔ {...}
São pontos de partida para discutir a modelagem e propor alternativas.
"""
import datetime
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
sns.set()
# no ipython usar este comando antes de rodar => %matplotlib osx
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
__author__ = "<NAME>" # codigo
__copyright__ = "Copyright 2020"
__license__ = "New BSD License"
__version__ = "1.5.2"
__email__ = "<EMAIL>"
__status__ = "Experimental"
def preparar_dados(p1, uf="SP", cidade=u"São Paulo"):
u"""Busca dados e organiza tabela "data" com os dados de referência para a
modelagem.
Fontes:
. Mundo: https://covid.ourworldindata.org
. Brasil: https://brasil.io
Retorna:
raw <DataFrame> | Série completa do número de mortes/dia por país, sem trans-
posição temporal
inicio <Series> | Referência dos indexes em raw para justapor o início das
curvas dos diferentes países
data <DataFrame> | Série de número de mortes/dia por país trazendo para o
zero (index 0) o primeiro dia em que ocorrem pelo menos p1 mortes
(ver macro parâmetros). Isto reduz a quantidade de países para o grupo
que está à frente ou pareado ao Brazil. A partir do index 0 é possível
comparar a evolução dos casos entre os países.
nbr <int> | Número de dias da série de dados para o Brasil
"""
# ◔◔ {usamos as mortes diárias por parecer ser o dado mais confiável}
raw = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/new_deaths.csv").fillna(0.0)
# ◔◔ {o link abaixo carrega o acumulado de mortes, não usamos pq a soma vai alisando a série}
# raw_soma = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/total_deaths.csv").fillna(0.0)
# tempo = raw['date'] # ◔◔ {não usamos as datas}
raw = raw.drop(columns='date')
raw = raw.drop(columns='World')
# para ver tbem os dados "oficias"
para_oficial = raw['Brazil']
# correcao de subnotificacao Brasil:
sub, hip = estimar_subnotificacao('Brasil')
p4br = ((sub + raw['Brazil'].sum()) / raw['Brazil'].sum())
raw['Brasil'] = raw['Brazil'] * p4br
# dict subs usa mesmas refs como chave => para reportar nos graficos
subs = {"Brasil": str(round(p4br, 1)) + " (" + hip + ")"}
# contruir base para a tabela "data"
inicio = raw.ge(p1).idxmax() # ◔◔ {encontra os index de qdo cada pais alcança p1}
data = pd.DataFrame({'Brasil':raw['Brasil'][inicio['Brasil']:]}).reset_index().drop(columns='index')
nbr = data.shape[0]
oficial = pd.DataFrame({'Brasil':para_oficial[inicio['Brasil']:]}).reset_index().drop(columns='index')
# dados Brasil
estados = [
'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS',
'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC',
'SP', 'SE', 'TO',
]
if uf not in estados or type(uf) is not str:
uf = "SP"
print(uf, u": UF inválida, usando 'SP'")
# ◔◔ {já baixamos filtrado para uf, mas pode se usar outros estados}
uf_data = pd.read_csv("https://brasil.io/dataset/covid19/caso?state="+uf+"&format=csv")
# adicionar dados da uf
uf_select = uf_data.loc[lambda df: df['place_type'] == "state", :]
uf_mortes = list(uf_select['deaths'].head(nbr + 1).fillna(0.0))
uf_mortes = [uf_mortes[i] - uf_mortes[i+1] for i in range(len(uf_mortes)-1)]
uf_mortes += [0 for _ in range(nbr-len(uf_mortes))] # corrigir tamanho
uf_mortes.reverse()
oficial[uf] = pd.Series(uf_mortes).values
sub_uf, hip_uf = estimar_subnotificacao(uf)
p4uf = ((sub_uf + pd.Series(uf_mortes).values.sum())/pd.Series(uf_mortes).values.sum())
data[uf] = pd.Series(uf_mortes).values * p4uf
subs[uf] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
# adicionar dados da cidade
cidade_select = uf_data.loc[lambda df: df['city'] == cidade, :]
if cidade_select.shape[0] > 0:
cidade_mortes = list(cidade_select['deaths'].head(nbr + 1).fillna(0.0))
cidade_mortes = [cidade_mortes[i] - cidade_mortes[i+1] for i in range(len(cidade_mortes)-1)]
cidade_mortes += [0 for _ in range(nbr-len(cidade_mortes))] # corrigir tamanho
cidade_mortes.reverse()
if sum(cidade_mortes):
# subnotificacao para cidade => aprox pela do estado
oficial[cidade] = pd.Series(cidade_mortes).values
data[cidade] = pd.Series(cidade_mortes).values * p4uf
subs[cidade] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não possui mortes confirmadas")
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não consta nos dados para esta UF")
print(u'Utilize uma das cidades disponíveis para o terceiro gráfico:')
for d in set(uf_data['city']):
print(d)
refs = list(subs.keys()) # as referencias validas...
# adicionar dados dos países à frente ou pareados ao Brasil
for k in inicio.keys():
if k == "Brasil": continue
if inicio[k] == 0 or inicio[k] > inicio["Brasil"]: continue
C = raw[k][inicio[k]:inicio[k]+nbr]
data[k] = C.values
return raw, inicio, data, nbr, subs, refs, oficial
def rodar_modelo(raw, inicio, data, nbr, p2, p3, ref, refs):
"""
Usa os dados preparados para gerar dados para visualização e a projeção da
evoluação da epidemia.
Retorna:
correlacionados <list>: Países mais correlacionados, usados para a projeção
calibrados <DataFrame>: Série alisada de mortes por dia com dados de ref e
países correlacionados
projetado <Array>: Série estimada para a evoluação da epidemia em ref
infos <dict>: informações sobre o pico estimado da epidemia
"""
# ◔◔ {Optamos por não alisar dados antes de calcular a correlação. Sabemos
# que a qualidade do report dos dados é variável, mas assumimos que o ruído
# é aleatório e por isso não é preciso alisar para que a correlação seja
# válida. Ao contrário, a correlação "bruta" seria a mais verossível}
# ◔◔ {mas caso você ache que vale a pena alisar antes, use o codigo abaixo}
# alisamento para os casos de morte reportados (média móvel)
# data = data.rolling(5).mean()
try: data = data.drop(columns='Brazil')
except: pass
# calcular a matriz de correlações:
pearson = data.corr()
# ◔◔ {o default do método usa a correlação de Pearson, cf. ref abaixo}
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
# ◔◔ { não incluir os casos locais para evitar endogeneidade}
out = refs # nao misturar com os demais cortes locais
# selecionar os p2 países que melhor se correlacionam com a ref
correlacionados = [_ for _ in pearson[ref].sort_values(ascending=False).keys() if _ not in out][:p2]
# criar tabela, começa com dados da ref
calibrados = pd.DataFrame({ref:data[ref]})
# preencher com os dados dos países correlacionados
for k in correlacionados:
# ◔◔ {pega os dados em raw pq agora usaremos todos os dados disponíveis para o país}
C = raw[k][inicio[k]:]
additional = pd.DataFrame({k: C.values}) # array
calibrados = pd.concat([calibrados, additional], axis=1)
# ◔◔ {aqui usamos um alisamento p3 de dias para deixar a visualização melhor}
calibrados = calibrados.rolling(p3).mean()
# ◔◔ {a projeção usa os dados alisados}
# ◔◔ {como é feita a projeção:
# 1. cada país correlacionado terá um peso, proporcianal a quanto se correlaciona
# .. soma dos pesos = 1
# .. quanto mais correlacionado, maior o peso }
pesos = [pearson[ref][c] for c in correlacionados] # melhor corr pesa mais
pesos = [pesos[i]/sum(pesos) for i in range(len(pesos))] # pesos normalizados
pesos = dict(zip(correlacionados, pesos)) # num dict para facilitar
# proj <list>: vai ter ao final o tamanho da maior serie em calibrados
proj = [np.nan for _ in range(nbr)] # começa com nan onde já temos os dados da ref
proj[-1] = calibrados[ref][nbr - 1] # primeiro valor coincide com último de ref
# será a partir daí que começa a projeção
# ◔◔ {a projeção segue dia a dia as variações dos países correlacionado}
for d in range(nbr, calibrados.shape[0]):
x = 0 # incremento estimado para o dia
for c in correlacionados:
if not np.isnan(calibrados[c][d]):
# adiciona o incremento % do país ponderado por seu peso
x += (calibrados[c][d]/calibrados[c][d-1]) * pesos[c]
else:
# ◔◔ {qdo acabam os dados de um país ele pára de influenciar a taxa}
x += 1 * pesos[c]
# print(d, c, x)
# a série da projeção é construída aplicando o incremento estimado ao dia anterior
proj.append(proj[-1] * x)
# projetado <Array>
projetado = np.array(proj)
# ◔◔ {informações adicionais}
# pico => valor máximo da série projetada
pico = np.nan_to_num(projetado).max() # float
# mortes valor absoluto
mortes_no_pico = str(int(pico)) # str
ix_do_pico = proj.index(np.nan_to_num(projetado).max()) # int => index
# dia em que acontece o pico [! soma 1 no index pq projetado sobrepoe o primeiro valor]
dia_do_pico = str(datetime.datetime.now() + datetime.timedelta(days=ix_do_pico-nbr+1))[:10] # str
# no caso do pico já ter passado
if calibrados[ref].max() > pico:
pico = calibrados[ref].max()
mortes_no_pico = str(int(pico))
ix_do_pico = list(calibrados[ref]).index(pico)
dia_do_pico = str(datetime.datetime.now() + datetime.timedelta(days=ix_do_pico-nbr))[:10] # str
# mortes totais: hoje mais tres semanas
ix_hoje = list(calibrados[ref]).index(calibrados[ref][nbr - 1])
mortes_totais = {
str(datetime.datetime.now())[:10]: int(calibrados[ref].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=7))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+7].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=14))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+14].sum()),
str(datetime.datetime.now() + datetime.timedelta(days=21))[:10]: int(calibrados[ref].sum()+projetado[nbr+1:nbr+1+21].sum()),
}
# consolidado para output
infos = {
"mortes_no_pico": mortes_no_pico,
"dia_do_pico": dia_do_pico,
"pico": pico,
"index": ix_do_pico,
"mt": mortes_totais,
}
return correlacionados, calibrados, projetado, infos
def gerar_fig_relatorio(p1, p2, p3, uf, cidade):
"""Roda vários cenários e monta mosaico de gráficos + notas."""
notas = u"""
Sobre o modelo e as estimativas:
As projeções são obtidas a partir da trajetória observada nos três países que melhor se correlacionem com a evolução dos dados do Brasil e localidades.
O desenho da curva projetada (pontilhada) é reflexo do comportamento observado nos países seguidos. Conforme a epidemia avança a referência pode mudar.
Outros parâmetros relevantes:
• os valores são corrigidos por uma estimativa de subnotificação (s) calculado para duas situações:
(a) mortes suspeitas aguardando confirmação e ainda não notificadas
(b) mortes potencialmente devido à Covid-19 notificadas como devidas a outras causas
• as curvas dos diferentes lugares são emparelhadas a partir do dia em que ocorrem N ou mais mortes (eixo x).
• as curvas são alisadas (médias móveis), por isso não iniciam no dia zero. O alisamento permite melhor visualização das curvas mas pode gerar algum
desvio com relação aos número diários absolutos.
• as projeções são recalculadas diariamente e podem sofrer alterações significativas em função das novas informações incorporadas.
Fontes dos dados:
https://covid.ourworldindata.org
https://brasil.io
https://transparencia.registrocivil.org.br
"""
equipe = u' M.Zac | L.Tozi | R.Luciano || https://github.com/Maurozac/covid-br/blob/master/compara.py'
totais = u"""
Mortes estimadas (acumulado)"""
hoje = str(datetime.datetime.now())[:16]
fig, ax = plt.subplots(1, 3, figsize=(12, 6), sharex=True, sharey=True)
fig.suptitle(u"Projeção da epidemia Covid-19" + " | " + hoje, fontsize=12)
fig.subplots_adjust(bottom=0.5)
fig.text(0.33, 0.42, notas, fontsize=7, verticalalignment='top')
fig.text(0.33, 0.02, equipe, family="monospace", fontsize='6', color='#ff003f', horizontalalignment='left')
raw, inicio, data, nbr, subs, refs, oficial = preparar_dados(p1, uf, cidade)
for i in [0, 1, 2]:
if refs[i] == 'n/d':
ax[i].set_title(u"Dados não disponíveis", fontsize=8)
break
correlacionados, calibrados, projetado, infos = rodar_modelo(raw, inicio, data, nbr, p2, p3, refs[i], refs)
ax[i].set_title(refs[i], fontsize=8)
ax[i].set_xlabel(u'Dias desde ' + str(p1) + ' mortes em um dia', fontsize=8)
ax[i].set_xlim(0, calibrados.shape[0]+25)
ax[i].set_ylabel(u'Mortes por dia', fontsize=8)
for c in correlacionados:
ax[i].plot(calibrados[c], linewidth=3, color="#ff7c7a")
lvi = calibrados[c].last_valid_index()
ax[i].text(lvi+1, calibrados[c][lvi], c, fontsize=6, verticalalignment="center")
ax[i].plot(calibrados[refs[i]], linewidth=3, color="#1f78b4")
ax[i].plot(projetado, linewidth=2, linestyle=":", color="#1f78b4")
lvi = | pd.Series(projetado) | pandas.Series |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import unittest
import numpy as np
import pandas as pd
from pandas import MultiIndex
from hadar.workflow.pipeline import (
Stage,
FreePlug,
RestrictedPlug,
FocusStage,
Clip,
Rename,
Drop,
Fault,
RepeatScenario,
Pipeline,
)
class Double(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines * 2
class Max9(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines.clip(None, 9)
class Divide(FocusStage):
def __init__(self):
Stage.__init__(self, RestrictedPlug(inputs=["a", "b"], outputs=["d", "r"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "d"] = (scenario["a"] / scenario["b"]).apply(np.floor)
scenario.loc[:, "r"] = scenario["a"] - scenario["b"] * scenario["d"]
return scenario.drop(["a", "b"], axis=1)
class Inverse(FocusStage):
def __init__(self):
FocusStage.__init__(self, RestrictedPlug(inputs=["d"], outputs=["d", "-d"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "-d"] = -scenario["d"]
return scenario.copy()
class Wrong(Stage):
def __init__(self):
Stage.__init__(self, plug=RestrictedPlug(inputs=["e"], outputs=["e"]))
def _process_timeline(self, timeline: pd.DataFrame) -> pd.DataFrame:
return timeline
class TestFreePlug(unittest.TestCase):
def test_linkable_to(self):
self.assertTrue(FreePlug().linkable_to(FreePlug()))
def test_join_to_fre(self):
# Input
a = FreePlug()
b = FreePlug()
# Test
c = a + b
self.assertEqual(a, c)
def test_join_to_restricted(self):
# Input
a = FreePlug()
b = RestrictedPlug(inputs=["a", "b"], outputs=["c", "d"])
# Test
c = a + b
self.assertEqual(b, c)
class TestRestrictedPlug(unittest.TestCase):
def test_linkable_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
self.assertTrue(a.linkable_to(FreePlug()))
def test_linkable_to_restricted_ok(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Test
self.assertTrue(a.linkable_to(b))
def test_linkable_to_restricted_wrong(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c", "f"], outputs=["e"])
# Test
self.assertFalse(a.linkable_to(b))
def test_join_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
b = a + FreePlug()
self.assertEqual(a, b)
def test_join_to_restricted(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Expected
exp = RestrictedPlug(inputs=["a"], outputs=["e", "d"])
# Test
c = a + b
self.assertEqual(exp, c)
class TestPipeline(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
# Expected
exp = pd.DataFrame({(0, "a"): [4, 8, 12]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
def test_add(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
pipe += Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [1, 1, 1], (0, "r"): [0, 0, 0]}, dtype=float)
# Test & Verify
o = pipe(i)
self.assertEqual(3, len(pipe.stages))
self.assertIsInstance(pipe.plug, RestrictedPlug)
pd.testing.assert_frame_equal(exp, o)
def test_link_pipeline_free_to_free(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pipe = Double() + Max9()
# Expected
exp = pd.DataFrame({(0, "a"): [2, 4, 6], (0, "b"): [8, 9, 9]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual([], pipe.plug.inputs)
self.assertEqual([], pipe.plug.outputs)
def test_link_pipeline_free_to_restricted(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Double() + Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [2, 4, 5], (0, "r"): [4, 0, 4]}, dtype="float")
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual(["a", "b"], pipe.plug.inputs)
self.assertEqual(["d", "r"], pipe.plug.outputs)
def test_link_pipeline_restricted_to_free(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Divide() + Double()
# Expected
exp = pd.DataFrame({(0, "d"): [4, 8, 10], (0, "r"): [4, 0, 4]}, dtype="float")
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual(["a", "b"], pipe.plug.inputs)
self.assertEqual(["d", "r"], pipe.plug.outputs)
def test_link_pipeline_restricted_to_restricted(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Divide() + Inverse()
# Expected
exp = pd.DataFrame(
{(0, "d"): [2, 4, 5], (0, "-d"): [-2, -4, -5], (0, "r"): [2, 0, 2]},
dtype="float",
)
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual({"a", "b"}, set(pipe.plug.inputs))
self.assertEqual({"d", "-d", "r"}, set(pipe.plug.outputs))
def test_wrong_link(self):
# Test & Verify
self.assertRaises(ValueError, lambda: Divide() + Wrong())
class TestStage(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame(
{
(0, "a"): [1, 2, 3],
(0, "b"): [4, 5, 6],
(1, "a"): [10, 20, 30],
(1, "b"): [40, 50, 60],
}
)
stage = Double()
# Expected
exp = pd.DataFrame(
{
(0, "a"): [2, 4, 6],
(0, "b"): [8, 10, 12],
(1, "a"): [20, 40, 60],
(1, "b"): [80, 100, 120],
}
)
# Test & Verify
o = stage(i)
pd.testing.assert_frame_equal(exp, o)
def test_wrong_compute(self):
i = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pipe = Inverse()
self.assertRaises(ValueError, lambda: pipe(i))
def test_standardize_column(self):
i = pd.DataFrame({"a": [1, 2, 3]})
# Expected
exp = pd.DataFrame({(0, "a"): [1, 2, 3]})
res = Stage.standardize_column(i)
pd.testing.assert_frame_equal(exp, res)
def test_build_multi_index(self):
# Input
scenarios = [1, 2, 3]
names = ["a", "b"]
# Expected
exp = MultiIndex.from_tuples(
[(1, "a"), (1, "b"), (2, "a"), (2, "b"), (3, "a"), (3, "b")]
)
# Test & Verify
index = Stage.build_multi_index(scenarios=scenarios, names=names)
pd.testing.assert_index_equal(exp, index)
class TestFocusPipeline(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame(
{
(0, "b"): [1, 2, 3],
(0, "a"): [4, 5, 6],
(1, "b"): [10, 20, 30],
(1, "a"): [40, 50, 60],
}
)
pipe = Divide()
# Expected
exp = pd.DataFrame(
{
(0, "d"): [4, 2, 2],
(0, "r"): [0, 1, 0],
(1, "d"): [4, 2, 2],
(1, "r"): [0, 10, 0],
},
dtype="float",
)
# Test & Verify
o = pipe(i)
| pd.testing.assert_frame_equal(exp, o) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import joblib
import calendar
from datetime import datetime, timedelta
from collections import OrderedDict
from constants import *
plt.style.use('seaborn-whitegrid')
if not os.path.exists('tmp'):
os.mkdir('tmp')
class Covid:
database = {'confirmed' : '',
'suspicious' : '',
'negatives' : '',
'deaths' : '',
'patients' : ''}
def __init__(self,state):
if state.lower() in ['national','nacional','all','country']:
self.state = 'Nacional'
elif state.lower() in ['cdmx', 'distritofederal', 'df', 'ciudad de mexico']:
self.state = 'DISTRITO FEDERAL'
elif state.upper() not in cdns_states:
print_state_names(state)
return
else:
self.state = state.upper()
self.state_code = inverse_dict_for_name_states[self.state]
def __str__(self):
return f'Data for {self.state},\nstate code: {self.state_code},\npopulation: {self.population()}'
def discrete(self,data_type):
data = pd.read_csv(Covid.database[data_type], encoding='ANSI')
return data.loc[data['nombre'] == self.state].values[0][3:]
def cummulative(self,data_type):
data = pd.read_csv(Covid.database[data_type], encoding='ANSI')
cummulative = []
raw = data.loc[data['nombre'] == self.state]
for i in raw.values[0][3:]:
if len(cummulative) == 0:
cummulative.append(i)
else:
cummulative.append(i+cummulative[-1])
return np.array(cummulative)
def actives(self,window=14):
try:
return joblib.load(f'tmp/{self.state}_actives_w_{window}_{Covid.database["patients"][3:6]}.pkl')
except:
pass
if self.state == 'Nacional':
data = pd.read_csv(Covid.database['patients'], encoding='ANSI')
data = change_df_names(data)
else:
data = pd.read_csv(Covid.database['patients'], encoding='ANSI')
data = change_df_names(data)
data = data[data['lives_at'] == self.state_code]
data = data[data['result']==1]
data['onset_symptoms'] = pd.to_datetime(data['onset_symptoms'])
set_dates = set(data['onset_symptoms'])
timeline= pd.date_range(start=min(set_dates), end = data['Updated_at'].iloc[0])
result = {key:0 for key in timeline}
for ind, day_active in enumerate(data['onset_symptoms']):
for _ in range(window):
if day_active not in timeline:
break
elif data['day_of_death'].iloc[ind] != '9999-99-99' and day_active > pd.to_datetime(data['day_of_death'].iloc[ind]):
break
else:
result[day_active] +=1
day_active = day_active + timedelta(days=1)
new_data = | pd.DataFrame() | pandas.DataFrame |
"""
Main script for the paper
A Comparison of Patient History- and EKG-based Cardiac Risk Scores
<NAME>, <NAME>, <NAME>
Proceedings of the AMIA Summit on Clinical Research Informatics (CRI), 2018
Runs various models, saves prediction outcomes.
"""
import feather, os, sys, pickle
from torch.autograd import Variable
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
# import my code
from ekgmodels import misc, base, mlp, resnet
import experiment_data as ed
import simple_baseline as sb
#############################################################################
# Runs all models for all outcomes, saves model output #
# to directory called #
# % prediction-output/<outcome>
#
# where <outcome> is one of
# - "future_afib"
# - "stroke_6m"
# - "troponin"
# - "mace"
#############################################################################
run_ehr_baselines = True
run_beatnet = True
run_resnet = True
features = ["simple",
"remark",
#"vae",
#"remark-vae",
#"simple-vae",
"simple-remark"]
#"simple-remark-vae"]
def run_models():
""" run all models for comparison --- takes a while """
# feature sets and combinations to compare
# Non Deep Methods
if run_ehr_baselines:
run_outcome(outcome = "future_afib", features=features)
run_outcome(outcome = "stroke_6m", features=features)
run_outcome(outcome = "troponin", features=features)
run_outcome(outcome = "mace", features=features)
# MLP outcomes
if run_beatnet:
run_mlp_outcome(outcome='troponin')
run_mlp_outcome(outcome='future_afib')
run_mlp_outcome(outcome='mace')
run_mlp_outcome(outcome='stroke_6m')
# full trace Resnet
if run_resnet:
run_resnet_outcome(outcome='future_afib')
run_resnet_outcome(outcome='mace')
run_resnet_outcome(outcome='stroke_6m')
run_resnet_outcome(outcome='troponin')
def make_figures():
""" make all figures from saved files (in "./prediction-output") """
# first figure --- full ekg example
plot_full_ekg_example()
# stitch together results table
outcomes = ['future_afib', "stroke_6m", "mace"] + \
['trop_3d', 'trop_7d' ,'trop_30d', 'trop_180d']
aucdf_no_hist = results_table(outcomes=outcomes, features=features,
do_logreg=True, do_net=True, subset="no_history")
aucdf = results_table(outcomes=outcomes, features=features,
do_logreg=True, do_net=True, subset=None)
# compute "improvement above simple, improvement above remark, etc"
# look at high MACE risk scores from beatnet
plot_age_based_risk(outcome="mace")
plot_age_based_risk(outcome="stroke_6m")
plot_age_based_risk(outcome="trop_30d")
plot_age_based_risk(outcome="trop_180d")
plot_age_based_risk(outcome="future_afib")
for cat in ['gender', 'race']:
plot_aucs_by_category(outcome="mace", category=cat)
plot_aucs_by_category(outcome="stroke_6m", category=cat)
plot_aucs_by_category(outcome="trop_180d", category=cat)
plot_aucs_by_category(outcome="trop_30d", category=cat)
plot_aucs_by_category(outcome="future_afib", category=cat)
plot_predictive_aucs(outcome="mace")
plot_predictive_aucs(outcome="trop_30d")
plot_predictive_aucs(outcome="trop_180d")
plot_predictive_aucs(outcome="future_afib")
plot_predictive_aucs(outcome="stroke_6m")
# construct and save cohort table
tabledf = ed.make_cohort_table()
print(tabledf)
tabledf.to_latex(os.path.join("prediction-output", "data-table.tex"), escape=False)
def run_outcome(outcome, features):
print("\n\n========================================================")
print(" predicting outcome %s for features %s "%(outcome, str(features)))
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# logistic regression --- do all features in parallel
from joblib import Parallel, delayed
res_list = Parallel(n_jobs=len(features), verbose=5)(
delayed(sb.run_logistic)(outcome=outcome, features=f)
for f in features)
for reslr, feats in zip(res_list, features):
print(" saving logreg with features %s "%feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'wb') as f:
pickle.dump(reslr, f)
def run_mlp_outcome(outcome):
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# beat models
beatmod = sb.run_beat_mlp(outcome=outcome, use_features=False)
beatmod.save(os.path.join(outcome_dir, "beatnet-raw-ekg.pkl"))
# beat with simple
feats = "simple"
mod = sb.run_beat_mlp(outcome=outcome, use_features=True, features=feats)
mod.save(os.path.join(outcome_dir, "beatnet-%s.pkl"%feats))
def run_resnet_outcome(outcome):
# predictor results
outcome_dir = os.path.join("prediction-output/%s"%outcome)
if not os.path.exists(outcome_dir):
os.makedirs(outcome_dir)
# run EKG Resnet and EKG Beatnet
mod = sb.run_ekg_mlp(outcome=outcome, use_features=False)
mod.save(os.path.join(outcome_dir, "resnet-raw-ekg.pkl"))
feats = "simple"
smod = sb.run_ekg_mlp(outcome=outcome, use_features=False)
smod.save(os.path.join(outcome_dir, "resnet-%s.pkl"%feats))
def best_logistic_model(resdict):
vdf = resdict['valdf']
vauc = vdf[ vdf['metric'] == 'auc' ]
best_idx = np.argmax(vauc['value'].values)
best_mod = vauc['model'].iloc[best_idx]
return best_mod
def pairwise_compare_aucs(outcome="mace", features=["simple", "remark"]):
if "trop_" in outcome:
outcome_dir = os.path.join("prediction-output/troponin")
Xdf, Ydf, encdf = ed.make_dataset(
outcome='troponin', features='simple', do_split=False)
else:
outcome_dir = os.path.join("prediction-output/%s"%outcome)
Xdf, Ydf, encdf = ed.make_dataset(
outcome=outcome, features='simple', do_split=False)
_, _ , mdata = misc.split_data(Xdf.values, Ydf.values, encdf.split, encdf)
split = "test"
subset = "no_history"
zs_mod, ys_mod = {}, {}
for feats in features:
print(" lreg w/ feats: ", feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)[outcome]
zs = res['z%s'%split]
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
zs_mod[feats] = zs
ys_mod[feats] = Ys
modfiles = ['beatnet-raw-ekg.pkl', 'beatnet-simple.pkl',
'resnet-raw-ekg.pkl', 'resnet-simple.pkl']
modfiles = ['beatnet-raw-ekg.pkl', 'resnet-raw-ekg.pkl', 'beatnet-simple.pkl']
for modfile in modfiles:
# load ekg mlp outcome
print(" ... loading mod file %s"%modfile)
mod = base.load_model(os.path.join(outcome_dir, modfile))
print(" ... has %d params"%mod.num_params())
mdf = mod.fit_res['%sdf-%s'%(split, outcome)]
#mauc = mdf[ mdf['metric']=='auc' ]['string'].iloc[0]
zs = mod.fit_res['z%s-enc-%s'%(split, outcome)]
if not hasattr(zs, 'index'):
split_idx = ['train', 'val', 'test'].index(split)
zs = pd.Series(zs, index=mdata[split_idx].index)
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
zs_mod[modfile] = zs
ys_mod[modfile] = Ys
# compare pairs
zsekg = zs_mod['beatnet-raw-ekg.pkl']
zsresnet = zs_mod['resnet-raw-ekg.pkl'].loc[zsekg.index]
zsbase = zs_mod['simple'].loc[zsekg.index]
#zsbase = zs_mod[0].loc[zsekg.index]
##zsrem = zs_mod[1].loc[zsekg.index]
ysbase = Ys.loc[zsekg.index]
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsbase.values, zsekg.values, num_samples=1000)
print(" simple => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsrem.values, zsekg.values, num_samples=1000)
print(" rem => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsresnet.values, zsekg.values, num_samples=1000)
print(" resnet => beatnet ", np.percentile(diff, [2.5, 97.5]))
sa, sb, diff = misc.bootstrap_auc_comparison(
ysbase.values, zsrem.values, zsresnet.values, num_samples=1000)
print(" rem => resnet ", np.percentile(diff, [2.5, 97.5]))
def results_table(outcomes=["future_afib"],
features=["simple", "remark"],
split="test",
subset = None,
do_logreg=True,
do_net=False):
# no history subset
auc_cols = OrderedDict()
for outcome in outcomes:
print("\n===== outcome %s ========"%outcome)
if "trop_" in outcome:
outcome_dir = os.path.join("prediction-output/troponin")
Xdf, Ydf, encdf = ed.make_dataset(
outcome='troponin', features='simple', do_split=False)
else:
outcome_dir = os.path.join("prediction-output/%s"%outcome)
Xdf, Ydf, encdf = ed.make_dataset(
outcome=outcome, features='simple', do_split=False)
_, _ , mdata = misc.split_data(Xdf.values, Ydf.values, encdf.split, encdf)
rows = []
for feats in features:
# lreg results
if do_logreg:
print(" lreg w/ feats: ", feats)
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)[outcome]
#best_mod = best_logistic_model(res)
#tdf = res['%sdf'%split]
#auc = tdf[ (tdf['model']==best_mod) & (tdf['metric']=='auc') ]['string'].iloc[0]
zs = res['z%s'%split]
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
baucs = misc.bootstrap_auc(Ys.values, zs.values, num_samples=1000)
auc = "%2.3f [%2.3f, %2.3f]"%(
baucs.mean(), np.percentile(baucs, 2.5), np.percentile(baucs, 97.5))
rows.append(auc)
#print('features: ', feats)
#print(res['coefdf'][best_mod].sort_values())
#xg boost results
else:
with open(os.path.join(outcome_dir, "xgb-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)
tdf = res['%sdf'%split]
auc = tdf[tdf['metric']=='auc']['string'].iloc[0]
rows.append(auc)
if do_net:
modfiles = ['beatnet-raw-ekg.pkl', 'beatnet-simple.pkl',
'resnet-raw-ekg.pkl', 'resnet-simple.pkl']
for modfile in modfiles:
# load ekg mlp outcome
print(" ... loading mod file %s"%modfile)
mod = base.load_model(os.path.join(outcome_dir, modfile))
mdf = mod.fit_res['%sdf-%s'%(split, outcome)]
mauc = mdf[ mdf['metric']=='auc' ]['string'].iloc[0]
zs = mod.fit_res['z%s-enc-%s'%(split, outcome)]
if not hasattr(zs, 'index'):
split_idx = ['train', 'val', 'test'].index(split)
zs = pd.Series(zs, index=mdata[split_idx].index)
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
if subset == "no_history":
has_past_afib = encdf.loc[zs.index]['has_afib_past']
no_idx = (Xs['mi']==0.) & (Xs['diabetes']==0.) & \
(Xs['stroke']==0.) & (Xs['hypertense']==0.) & \
(has_past_afib == 0.) & \
(encs['age'] < 50.)
if outcome == "mace":
untested_idx = ~pd.isnull(encs['has_mace'])
no_idx = no_idx & untested_idx
zs = zs[no_idx]
Ys = Ys[no_idx]
print(Ys, zs)
baucs = misc.bootstrap_auc(Ys.values, zs.values, num_samples=1000)
mauc = "%2.3f [%2.3f, %2.3f]"%(
baucs.mean(), np.percentile(baucs, 2.5), np.percentile(baucs, 97.5))
rows.append(mauc)
auc_cols[outcome] = rows
import copy
fidx = copy.deepcopy(features)
if do_net:
fidx += ['beatnet', 'beatnet+simple',
'resnet', 'resnet+simple']
aucdf = pd.DataFrame(auc_cols, index=fidx)
return aucdf
from ekgmodels import viz
import matplotlib.pyplot as plt; plt.ion()
#########################
# EKG Plotting Code #
#########################
def plot_full_ekg_example():
# load outcome data and encounter dataframe
Xdf, Ydf, encdf = ed.make_dataset(outcome='mace', features='remarks', do_split=False)
Xmat, _, tgrid = ed.load_ekg_data(encdf=encdf, constrain_range=False)
fig, ax = plt.figure(figsize=(12,3)), plt.gca()
ax = viz.plot_stacked_ecg(ax, Xmat[300], linewidth=1.5) # **kwargs)
ax.set_xlabel("time (seconds)", fontsize=16)
ax.get_yaxis().set_ticks([])
fig.tight_layout()
fig.savefig('prediction-output/example_ekg.png', bbox_inches='tight', dpi=150)
plt.close("all")
# plot segmented beats
from ekgmodels import preproc
beatdf, beatmat = preproc.create_beat_dataset_fixed(
encdf, Xmat, tgrid, detrend=False)
def savebeat(bis, name="example_beats.png"):
fig, axarr = plt.subplots(1, 3, figsize=(6, 3.5))
for bi, ax in zip(bis, axarr.flatten()):
blen = beatdf['beat_len'].iloc[bi]
ax = viz.plot_stacked_beat(ax, beatmat[bi], beatlen=blen)
axarr[0].set_xlabel("")
axarr[2].set_xlabel("")
axarr[1].set_ylabel("")
axarr[2].set_ylabel("")
fig.savefig("prediction-output/%s"%name, bbox_inches='tight', dpi=150)
plt.close("all")
bis = [0, 1, 2]
savebeat(bis, name="example_beats.png")
bis = [300, 301, 302]
savebeat(bis, name="example_beats-2.png")
def load_prediction_scores(outcome, feats, split="test"):
# load data for reference
if "trop_" in outcome:
outcome_dir = os.path.join("prediction-output/troponin")
Xdf, Ydf, encdf = ed.make_dataset(
outcome='troponin', features='simple', do_split=False)
else:
outcome_dir = os.path.join("prediction-output/%s"%outcome)
Xdf, Ydf, encdf = ed.make_dataset(
outcome=outcome, features='simple', do_split=False)
_, _ , mdata = misc.split_data(Xdf.values, Ydf.values, encdf.split, encdf)
# load predictions
if '.pkl' in feats:
print(" ... loading mod file %s"%feats)
mod = base.load_model(os.path.join(outcome_dir, feats))
zs = mod.fit_res['z%s-enc-%s'%(split, outcome)]
if not hasattr(zs, 'index'):
split_idx = ['train', 'val', 'test'].index(split)
zs = pd.Series(zs, index=mdata[split_idx].index)
else:
with open(os.path.join(outcome_dir, "lreg-%s.pkl"%feats), 'rb') as f:
res = pickle.load(f)[outcome]
zs = res['z%s'%split]
# subselect to test split
Ys = Ydf.loc[zs.index][outcome]
Xs = Xdf.loc[zs.index]
encs = encdf.loc[zs.index]
return zs, Ys, Xs, encs
def get_age_bin_aucs(outcome="mace", feats="simple"):
# make sure we subsample to untested for mace
zs, Ys, Xs, encs = load_prediction_scores(
outcome=outcome, feats=feats, split='test')
if outcome == "mace":
untested_idx = ~ | pd.isnull(encs['has_mace']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
This code allows us to run the configuration slices analysis for the TGM model
"""
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import random
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams.update({'font.size': 18})
pathResults = './results/ResultTGM/'
path_to_graph = './results/resultSliceClustering/'
list_datasets= ['DBLP1', 'DBLP2' , 'PubMed_Diabets', 'classic3','classic4' , 'ag_news']
NameDatasets = ['DBLP1', 'DBLP2' , 'PubMed-Diabets', 'Classic3', 'Classic4', 'AG-news']
config = [['BOW', 'SentenceRo','Entity'], ['BOW', 'SentenceRo','Entity'],['BOW', 'Bert', 'SentenceRo', 'GLOVE', 'Entity'],
['BOW', 'Bert', 'GLOVE', 'Entity'],['BOW', 'Bert', 'Entity'], ['BOW', 'SentenceRo', 'Entity'],
['BOW', 'Bert'], ['BOW', 'SentenceRo'], ['Bert', 'Entity'],
['SentenceRo', 'Entity'],['BOW', 'SentenceRo', 'GLOVE', 'Entity'],['BOW', 'SentenceRo', 'Bert', 'Entity'],
['BOW', 'GLOVE', 'Entity'],['SentenceRo', 'GLOVE', 'Entity']]
tabVide = pd.DataFrame(columns=["Dataset", "Time", "ACC", "NMI", "ARI", "Purity",'Config'])
for c , configName in enumerate(config):
slices = config[c]
configName = ''
for s in range(len(slices)):
configName = configName + slices[s] + '_'
if len(slices)==5:
configName2= 'All'
else:
configName2 = configName
tableClustering = pd.read_csv(pathResults+'Results_TGM_'+ configName + '.csv')
tableClustering['Config'] = configName[:-1] .replace("_", "&")
tableClustering['SliceNb'] =len(slices)
if len(slices)< 4:
tableClustering['Slice'] = str(len(slices)) + 'Slices'
else:
tableClustering['Slice'] = '> 4 Slices'
print('tableClustering ', tableClustering.shape)
tabVide = | pd.concat([tabVide, tableClustering], axis=0) | pandas.concat |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
try:
import pandas.tseries.holiday
except ImportError:
pass
class IntFrameWithScalar:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
class OpWithFillValue:
def setup(self):
# GH#31300
arr = np.arange(10 ** 6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
class MixedFrameWithSeriesAxis:
params = [
[
"eq",
"ne",
"lt",
"le",
"ge",
"gt",
"add",
"sub",
"truediv",
"floordiv",
"mul",
"pow",
]
]
param_names = ["opname"]
def setup(self, opname):
arr = np.arange(10 ** 6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
self.row = df.iloc[0]
def time_frame_op_with_series_axis0(self, opname):
getattr(self.df, opname)(self.ser, axis=0)
def time_frame_op_with_series_axis1(self, opname):
getattr(operator, opname)(self.df, self.ser)
class Ops:
params = [[True, False], ["default", 1]]
param_names = ["use_numexpr", "threads"]
def setup(self, use_numexpr, threads):
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
if threads != "default":
expr.set_numexpr_threads(threads)
if not use_numexpr:
expr.set_use_numexpr(False)
def time_frame_add(self, use_numexpr, threads):
self.df + self.df2
def time_frame_mult(self, use_numexpr, threads):
self.df * self.df2
def time_frame_multi_and(self, use_numexpr, threads):
self.df[(self.df > 0) & (self.df2 > 0)]
def time_frame_comparison(self, use_numexpr, threads):
self.df > self.df2
def teardown(self, use_numexpr, threads):
expr.set_use_numexpr(True)
expr.set_numexpr_threads()
class Ops2:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.df2_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.s = Series(np.random.randn(N))
# Division
def time_frame_float_div(self):
self.df // self.df2
def time_frame_float_div_by_zero(self):
self.df / 0
def time_frame_float_floor_by_zero(self):
self.df // 0
def time_frame_int_div_by_zero(self):
self.df_int / 0
# Modulo
def time_frame_int_mod(self):
self.df_int % self.df2_int
def time_frame_float_mod(self):
self.df % self.df2
# Dot product
def time_frame_dot(self):
self.df.dot(self.df2)
def time_series_dot(self):
self.s.dot(self.s)
def time_frame_series_dot(self):
self.df.dot(self.s)
class Timeseries:
params = [None, "US/Eastern"]
param_names = ["tz"]
def setup(self, tz):
N = 10 ** 6
halfway = (N // 2) - 1
self.s = Series(date_range("20010101", periods=N, freq="T", tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
def time_timestamp_series_compare(self, tz):
self.ts >= self.s
def time_timestamp_ops_diff(self, tz):
self.s2.diff()
def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
class IrregularOps:
def setup(self):
N = 10 ** 5
idx = date_range(start="1/1/2000", periods=N, freq="s")
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class TimedeltaOps:
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp("2000")
def time_add_td_ts(self):
self.td + self.ts
class CategoricalComparisons:
params = ["__lt__", "__le__", "__eq__", "__ne__", "__ge__", "__gt__"]
param_names = ["op"]
def setup(self, op):
N = 10 ** 5
self.cat = pd.Categorical(list("aabbcd") * N, ordered=True)
def time_categorical_op(self, op):
getattr(self.cat, op)("b")
class IndexArithmetic:
params = ["float", "int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class NumericInferOps:
# from GH 7332
params = numeric_dtypes
param_names = ["dtype"]
def setup(self, dtype):
N = 5 * 10 ** 5
self.df = DataFrame(
{"A": np.arange(N).astype(dtype), "B": np.arange(N).astype(dtype)}
)
def time_add(self, dtype):
self.df["A"] + self.df["B"]
def time_subtract(self, dtype):
self.df["A"] - self.df["B"]
def time_multiply(self, dtype):
self.df["A"] * self.df["B"]
def time_divide(self, dtype):
self.df["A"] / self.df["B"]
def time_modulo(self, dtype):
self.df["A"] % self.df["B"]
class DateInferOps:
# from GH 7332
def setup_cache(self):
N = 5 * 10 ** 5
df = DataFrame({"datetime64": np.arange(N).astype("datetime64[ms]")})
df["timedelta"] = df["datetime64"] - df["datetime64"]
return df
def time_subtract_datetimes(self, df):
df["datetime64"] - df["datetime64"]
def time_timedelta_plus_datetime(self, df):
df["timedelta"] + df["datetime64"]
def time_add_timedeltas(self, df):
df["timedelta"] + df["timedelta"]
class AddOverflowScalar:
params = [1, -1, 0]
param_names = ["scalar"]
def setup(self, scalar):
N = 10 ** 6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray:
def setup(self):
N = 10 ** 6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
def time_add_overflow_arr_rev(self):
checked_add_with_arr(self.arr, self.arr_rev)
def time_add_overflow_arr_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)
def time_add_overflow_b_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, b_mask=self.arr_nan_1)
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(
self.arr, self.arr_mixed, arr_mask=self.arr_nan_1, b_mask=self.arr_nan_2
)
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
# These offsets currently raise a NotImplimentedError with .apply_index()
non_apply = [
pd.offsets.Day(),
pd.offsets.BYearEnd(),
pd.offsets.BYearBegin(),
pd.offsets.BQuarterEnd(),
pd.offsets.BQuarterBegin(),
pd.offsets.BMonthEnd(),
pd.offsets.BMonthBegin(),
pd.offsets.CustomBusinessDay(),
pd.offsets.CustomBusinessDay(calendar=hcal),
pd.offsets.CustomBusinessMonthBegin(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
]
other_offsets = [
pd.offsets.YearEnd(),
pd.offsets.YearBegin(),
pd.offsets.QuarterEnd(),
pd.offsets.QuarterBegin(),
pd.offsets.MonthEnd(),
| pd.offsets.MonthBegin() | pandas.offsets.MonthBegin |
# -*- coding: utf-8 -*-
"""Cross references from cbms2019.
.. seealso:: https://github.com/pantapps/cbms2019
"""
import pandas as pd
from pyobo.constants import (
PROVENANCE,
SOURCE_ID,
SOURCE_PREFIX,
TARGET_ID,
TARGET_PREFIX,
XREF_COLUMNS,
)
__all__ = [
"get_cbms2019_xrefs_df",
]
#: Columns: DOID, DO name, xref xb, xref ix
base_url = "https://raw.githubusercontent.com/pantapps/cbms2019/master"
doid_to_all = f"{base_url}/mesh_icd10cm_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all = f"{base_url}/mesh_icd10cm_via_snomedct_not_mapped_umls.tsv"
#: Columns: DOID, DO name, xref xb, xref ix
doid_to_all_2 = f"{base_url}/mesh_snomedct_via_do_not_mapped_umls.tsv"
#: Columns: SNOMEDCT_ID, SNOMEDCIT_NAME, ICD10CM_ID, ICD10CM_NAME, MESH_ID
all_to_all_2 = f"{base_url}/mesh_snomedct_via_icd10cm_not_mapped_umls.tsv"
NSM = {
"MESH": "mesh",
"ICD10CM": "icd",
"SNOMEDCT_US_2016_03_01": "snomedct",
}
def _get_doid(url: str) -> pd.DataFrame:
df = pd.read_csv(url, sep="\t", usecols=["DO_ID", "resource", "resource_ID"])
df.columns = [SOURCE_ID, TARGET_PREFIX, TARGET_ID]
df[SOURCE_PREFIX] = "doid"
df[SOURCE_ID] = df[SOURCE_ID].map(lambda s: s[len("DOID:") :])
df[PROVENANCE] = url
df[TARGET_PREFIX] = df[TARGET_PREFIX].map(NSM.get)
df = df[XREF_COLUMNS]
return df
def _get_mesh_to_icd_via_doid() -> pd.DataFrame:
return _get_doid(doid_to_all)
def _get_mesh_to_icd_via_snomedct() -> pd.DataFrame:
df = pd.read_csv(all_to_all, sep="\t", usecols=["SNOMEDCT_ID", "ICD10CM_ID", "MESH_ID"])
rows = []
for snomedct_id, icd_id, mesh_id in df.values:
rows.append(("mesh", mesh_id, "snomedct", snomedct_id, all_to_all))
rows.append(("snomedct", snomedct_id, "icd", icd_id, all_to_all))
return | pd.DataFrame(rows, columns=XREF_COLUMNS) | pandas.DataFrame |
Subsets and Splits