prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from typing import Union, Dict, Tuple, List
import os
import tempfile
import zipfile as zf
import logging
import patsy
import pandas as pd
import numpy as np
import scipy.sparse
import xarray as xr
import dask
import dask.array
try:
import anndata
except ImportError:
anndata = None
logger = logging.getLogger(__name__)
def _sparse_to_xarray(data, dims):
num_observations, num_features = data.shape
def fetch_X(idx):
idx = np.asarray(idx).reshape(-1)
retval = data[idx].toarray()
if idx.size == 1:
retval = np.squeeze(retval, axis=0)
return retval.astype(np.float32)
delayed_fetch = dask.delayed(fetch_X, pure=True)
X = [
dask.array.from_delayed(
delayed_fetch(idx),
shape=(num_features,),
dtype=np.float32
) for idx in range(num_observations)
]
X = xr.DataArray(dask.array.stack(X), dims=dims)
# currently broken:
# X = data.X
# X = dask.array.from_array(X, X.shape)
#
# X = xr.DataArray(X, dims=dims)
return X
def xarray_from_data(
data: Union[anndata.AnnData, xr.DataArray, xr.Dataset, np.ndarray],
dims: Union[Tuple, List] = ("observations", "features")
) -> xr.DataArray:
"""
Parse any array-like object, xr.DataArray, xr.Dataset or anndata.Anndata and return a xarray containing
the observations.
:param data: Array-like, xr.DataArray, xr.Dataset or anndata.Anndata object containing observations
:param dims: tuple or list with two strings. Specifies the names of the xarray dimensions.
:return: xr.DataArray of shape `dims`
"""
if anndata is not None and isinstance(data, anndata.AnnData):
if scipy.sparse.issparse(data.X):
X = _sparse_to_xarray(data.X, dims=dims)
X.coords[dims[0]] = np.asarray(data.obs_names)
X.coords[dims[1]] = np.asarray(data.var_names)
else:
X = data.X
X = xr.DataArray(X, dims=dims, coords={
dims[0]: np.asarray(data.obs_names),
dims[1]: np.asarray(data.var_names),
})
elif isinstance(data, xr.Dataset):
X: xr.DataArray = data["X"]
elif isinstance(data, xr.DataArray):
X = data
else:
if scipy.sparse.issparse(data):
X = _sparse_to_xarray(data, dims=dims)
else:
X = xr.DataArray(data, dims=dims)
return X
def design_matrix(
sample_description: pd.DataFrame,
formula: str,
as_categorical: Union[bool, list] = True,
return_type: str = "matrix",
) -> Union[patsy.design_info.DesignMatrix, xr.Dataset, pd.DataFrame]:
"""
Create a design matrix from some sample description
:param sample_description: pandas.DataFrame of length "num_observations" containing explanatory variables as columns
:param formula: model formula as string, describing the relations of the explanatory variables.
E.g. '~ 1 + batch + confounder'
:param as_categorical: boolean or list of booleans corresponding to the columns in 'sample_description'
If True, all values in 'sample_description' will be treated as categorical values.
If list of booleans, each column will be changed to categorical if the corresponding value in 'as_categorical'
is True.
Set to false, if columns should not be changed.
:param return_type: type of the returned value.
- "matrix": return plain patsy.design_info.DesignMatrix object
- "dataframe": return pd.DataFrame with observations as rows and params as columns
- "xarray": return xr.Dataset with design matrix as ds["design"] and the sample description embedded as
one variable per column
:return: a model design matrix
"""
sample_description: pd.DataFrame = sample_description.copy()
if type(as_categorical) is not bool or as_categorical:
if type(as_categorical) is bool and as_categorical:
as_categorical = np.repeat(True, sample_description.columns.size)
for to_cat, col in zip(as_categorical, sample_description):
if to_cat:
sample_description[col] = sample_description[col].astype("category")
dmat = patsy.highlevel.dmatrix(formula, sample_description)
if return_type == "dataframe":
df = pd.DataFrame(dmat, columns=dmat.design_info.column_names)
df = pd.concat([df, sample_description], axis=1)
df.set_index(list(sample_description.columns), inplace=True)
return df
elif return_type == "xarray":
ar = xr.DataArray(dmat, dims=("observations", "design_params"))
ar.coords["design_params"] = dmat.design_info.column_names
ds = xr.Dataset({
"design": ar,
})
for col in sample_description:
ds[col] = (("observations",), sample_description[col])
return ds
else:
return dmat
def sample_description_from_xarray(
dataset: xr.Dataset,
dim: str,
):
"""
Create a design matrix from a given xarray.Dataset and model formula.
:param dataset: xarray.Dataset containing explanatory variables.
:param dim: name of the dimension for which the design matrix should be created.
The design matrix will be of shape (dim, "design_params").
:return: pd.DataFrame
"""
explanatory_vars = [key for key, val in dataset.variables.items() if val.dims == (dim,)]
if len(explanatory_vars) > 0:
sample_description = dataset[explanatory_vars].to_dataframe()
else:
sample_description = pd.DataFrame({"intercept": range(dataset.dims[dim])})
return sample_description
def design_matrix_from_xarray(
dataset: xr.Dataset,
dim: str,
formula=None,
formula_key="formula",
as_categorical=True,
return_type="matrix",
):
"""
Create a design matrix from a given xarray.Dataset and model formula.
The formula will be chosen by the following order:
1) from the parameter 'formula'
2) from dataset[formula_key]
The resulting design matrix as well as the formula and explanatory variables will be stored at the corresponding
'\*_key' keys in the returned dataset.
:param dim: name of the dimension for which the design matrix should be created.
The design matrix will be of shape (dim, "design_params").
:param dataset: xarray.Dataset containing explanatory variables.
:param formula: model formula as string, describing the relations of the explanatory variables.
If None, the formula is assumed to be stored inside 'dataset' as attribute
E.g. '~ 1 + batch + condition'
:param formula_key: index of the formula attribute inside 'dataset'.
Will store the formula as `dataset.attrs[formula_key]` inside the dataset
:param as_categorical: boolean or list of booleans corresponding to the columns in 'sample_description'
If True, all values in 'sample_description' will be treated as categorical values.
If list of booleans, each column will be changed to categorical if the corresponding value in 'as_categorical'
is True.
Set to false, if columns should not be changed.
:param return_type: type of the returned data; see design_matrix() for details
"""
if formula is None:
formula = dataset.attrs.get(formula_key)
if formula is None:
raise ValueError("formula could not be found")
sample_description = sample_description_from_xarray(dataset=dataset, dim=dim)
dmat = design_matrix(
sample_description=sample_description,
formula=formula,
as_categorical=as_categorical,
return_type=return_type
)
return dmat
def sample_description_from_anndata(dataset: anndata.AnnData):
"""
Create a design matrix from a given xarray.Dataset and model formula.
:param dataset: anndata.AnnData containing explanatory variables.
:return pd.DataFrame
"""
return dataset.obs
def design_matrix_from_anndata(
dataset: anndata.AnnData,
formula=None,
formula_key="formula",
as_categorical=True,
return_type="matrix",
):
r"""
Create a design matrix from a given xarray.Dataset and model formula.
The formula will be chosen by the following order:
1) from the parameter 'formula'
2) from dataset.uns[formula_key]
The resulting design matrix as well as the formula and explanatory variables will be stored at the corresponding
'\*_key' keys in the returned dataset.
:param dataset: anndata.AnnData containing explanatory variables.
:param formula: model formula as string, describing the relations of the explanatory variables.
If None, the formula is assumed to be stored inside 'dataset' as attribute
E.g. '~ 1 + batch + condition'
:param formula_key: index of the formula attribute inside 'dataset'.
Will store the formula as `dataset.uns[formula_key]` inside the dataset
:param as_categorical: boolean or list of booleans corresponding to the columns in 'sample_description'
If True, all values in 'sample_description' will be treated as categorical values.
If list of booleans, each column will be changed to categorical if the corresponding value in 'as_categorical'
is True.
Set to false, if columns should not be changed.
:param return_type: type of the returned data; see design_matrix() for details
"""
if formula is None:
formula = dataset.uns.get(formula_key)
if formula is None:
# could not find formula; try to construct it from explanatory variables
raise ValueError("formula could not be found")
sample_description = sample_description_from_anndata(dataset=dataset)
dmat = design_matrix(
sample_description=sample_description,
formula=formula,
as_categorical=as_categorical,
return_type=return_type
)
return dmat
def load_mtx_to_adata(path, cache=True):
"""
Loads mtx file, genes and barcodes from a given directory into an `anndata.AnnData` object
:param path: the folder containing the files
:param cache: Should a cache file be used for the AnnData object?
See `scanpy.api.read` for details.
:return: `anndata.AnnData` object
"""
import scanpy.api as sc
adata = sc.read(os.path.join(path, "matrix.mtx"), cache=cache).T
files = os.listdir(os.path.join(path))
for file in files:
if file.startswith("genes"):
delim = ","
if file.endswith("tsv"):
delim = "\t"
fpath = os.path.join(path, file)
logger.info("Reading %s as gene annotation...", fpath)
tbl = pd.read_csv(fpath, header=None, sep=delim)
tbl.columns = np.vectorize(lambda x: "col_%d" % x)(tbl.columns)
adata.var = tbl
# ad.var_names = tbl[1]
elif file.startswith("barcodes"):
delim = ","
if file.endswith("tsv"):
delim = "\t"
fpath = os.path.join(path, file)
logger.info("Reading %s as barcode file...", fpath)
tbl = | pd.read_csv(fpath, header=None, sep=delim) | pandas.read_csv |
# Copyright (c) 2020 Civic Knowledge. This file is licensed under the terms of the
# MIT license included in this distribution as LICENSE
import logging
import re
from collections import defaultdict, deque
from pathlib import Path
from time import time
import pandas as pd
from synpums.util import *
''
_logger = logging.getLogger(__name__)
def sample_to_sum(N, df, col, weights):
"""Sample a number of records from a dataset, then return the smallest set of
rows at the front of the dataset where the weight sums to more than N"""
t = df.sample(n=N, weights=weights, replace=True)
# Get the number of records that sum to N.
arg = t[col].cumsum().sub(N).abs().astype(int).argmin()
return t.iloc[:arg + 1]
def rms(s):
"""Root mean square"""
return np.sqrt(np.sum(np.square(s)))
def vector_walk_callback(puma_task, tract_task, data, memo):
pass
def make_acs_target_df(acs, columns, geoid):
t = acs.loc[geoid]
target_map = {c + '_m90': c for c in columns if "WGTP" not in columns}
target_df = pd.DataFrame({
'est': t[target_map.values()],
'm90': t[target_map.keys()].rename(target_map)
})
target_df['est_min'] = target_df.est - target_df.m90
target_df['est_max'] = target_df.est + target_df.m90
target_df.loc[target_df.est_min < 0, 'est_min'] = 0
return target_df.astype('Int64')
def geoid_path(geoid):
from pathlib import Path
from geoid.acs import AcsGeoid
go = AcsGeoid.parse(geoid)
try:
return Path(f"{go.level}/{go.stusab}/{go.county:03d}/{str(go)}.csv")
except AttributeError:
return Path(f"{go.level}/{go.stusab}/{str(go)}.csv")
class AllocationTask(object):
"""Represents the allocation process to one tract"""
def __init__(self, region_geoid, puma_geoid, acs_ref, hh_ref, cache_dir):
self.region_geoid = region_geoid
self.puma_geoid = puma_geoid
self.acs_ref = acs_ref
self.hh_ref = hh_ref
self.cache_dir = cache_dir
self.sample_pop = None
self.sample_weights = None
self.unallocated_weights = None # Initialized to the puma weights, gets decremented
self.target_marginals = None
self.allocated_weights = None
self.household_count = None
self.population_count = None
self.gq_count = None
self.gq_cols = None
self.sex_age_cols = None
self.hh_size_cols = None
self.hh_race_type_cols = None
self.hh_eth_type_cols = None
self.hh_income_cols = None
self._init = False
self.running_allocated_marginals = None
# A version of the sample_pop constructed by map_cp, added as an instance var so
# the probabilities can be manipulated during the vector walk.
self.cp_df = None
self.cp_prob = None
@property
def row(self):
from geoid.acs import AcsGeoid
tract = AcsGeoid.parse(self.region_geoid)
return [tract.state, tract.stusab, tract.county, self.region_geoid, self.puma_geoid, str(self.acs_ref),
str(self.hh_ref)]
def init(self, use_sample_weights=False, puma_weights=None):
"""Load all of the data, just before running the allocation"""
if isinstance(self.hh_ref, pd.DataFrame):
hh_source = self.hh_ref
else:
hh_source = pd.read_csv(self.hh_ref, index_col='SERIALNO', low_memory=False) \
.drop(columns=['geoid'], errors='ignore').astype('Int64')
if isinstance(self.acs_ref, pd.DataFrame):
acs = self.acs_ref
else:
acs = pd.read_csv(self.acs_ref, index_col='geoid', low_memory=False)
# These are only for debugging.
#self.hh_source = hh_source
#self.tract_acs = acs
return self._do_init(hh_source, acs, puma_weights=puma_weights)
def _do_init(self, hh_source, acs, puma_weights=None):
self.serialno = hh_source.index
# Col 0 is the WGTP column
w_cols = [c for c in hh_source.columns if "WGTP" in c]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
# Not actually a sample pop --- populations are supposed to be unweighted
self.sample_pop = hh_source[['WGTP'] + not_w_cols].iloc[:, 1:].reset_index(drop=True).astype(int)
# Shouldn't this be:
# self.sample_pop = hh_source[not_w_cols].reset_index(drop=True).astype(int)
self.sample_weights = hh_source.iloc[:, 0].reset_index(drop=True).astype(int)
assert self.sample_pop.shape[0] == self.sample_weights.shape[0]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
self.target_marginals = make_acs_target_df(acs, not_w_cols, self.region_geoid)
self.household_count = acs.loc[self.region_geoid].b11016_001
self.population_count = acs.loc[self.region_geoid].b01003_001
self.gq_count = acs.loc[self.region_geoid].b26001_001
self.total_count = self.household_count + self.gq_count
self.allocated_weights = np.zeros(len(self.sample_pop))
self.unallocated_weights = puma_weights if puma_weights is not None else self.sample_weights.copy()
self.running_allocated_marginals = pd.Series(0, index=self.target_marginals.index)
# Sample pop, normalized to unit length to speed up cosine similarity
self.sample_pop_norm = vectors_normalize(self.sample_pop.values)
# Column sets
self.gq_cols = ['b26001_001']
self.sex_age_cols = [c for c in hh_source.columns if c.startswith('b01001')]
self.hh_size_cols = [c for c in hh_source.columns if c.startswith('b11016')]
p = re.compile(r'b11001[^hi]_')
self.hh_race_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b11001[hi]_')
self.hh_eth_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b19025')
self.hh_income_cols = [c for c in hh_source.columns if p.match(c)]
# We will use this identity in the numpy version of step_scjhedule
# assert all((self.cp.index / 2).astype(int) == self['index'])
self.rng = np.random.default_rng()
self.make_cp(self.sample_pop)
self._init = True
return acs
def make_cp(self, sp):
"""Make a version of the sample population with two records for each
row, one the negative of the one before it. This is used to generate
rows that can be used in the vector walk."""
self.cp = pd.concat([sp, sp]).sort_index().reset_index()
self.cp.insert(1, 'sign', 1)
self.cp.insert(2, 'select_weight', 0)
self.cp.iloc[0::2, 1:] = self.cp.iloc[0::2, 1:] * -1 # flip sign on the marginal counts
self.update_cp()
return self.cp
def update_cp(self):
self.cp.loc[0::2, 'select_weight'] = self.allocated_weights.tolist()
self.cp.loc[1::2, 'select_weight'] = self.unallocated_weights.tolist()
def set_cp_prob(self, cp_prob):
pass
@property
def path(self):
return Path(self.cache_dir).joinpath(geoid_path(str(self.region_geoid))).resolve()
@property
def pums(self):
"""Return the PUMS household and personal records for this PUMA"""
from .pums import build_pums_dfp_dfh
from geoid.acs import Puma
puma = Puma.parse(self.puma_geoid)
dfp, dfh = build_pums_dfp_dfh(puma.stusab, year=2018, release=5)
return dfp, dfh
def get_saved_frame(self):
if self.path.exists():
return pd.read_csv(self.path.resolve(), low_memory=False)
else:
return None
@property
def results_frame(self):
return pd.DataFrame({
'geoid': self.region_geoid,
'serialno': self.serialno,
'weight': self.allocated_weights
})
def save_frame(self):
self.path.parent.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame({
'serialno': self.serialno,
'weight': self.allocated_weights
})
df = df[df.weight > 0]
df.to_csv(self.path, index=False)
def load_frame(self):
df = pd.read_csv(self.path, low_memory=False)
self.init()
aw, _ = df.align(self.sample_weights, axis=0)
self.allocated_weights = df.set_index('serialno').reindex(self.serialno).fillna(0).values[:, 0]
def inc(self, rown, n=1):
if self.allocated_weights[rown] > 0 or n > 0:
self.allocated_weights[rown] += n # Increment the count column
self.running_allocated_marginals += n * self.sample_pop.iloc[rown]
@property
def allocated_pop(self):
return self.sample_pop.mul(self.allocated_weights, axis=0)
@property
def allocated_marginals(self):
t = self.allocated_pop.sum()
t.name = 'allocated_marginals'
return t
def calc_region_sum(self):
return self.allocated_weights.sum()
def column_diff(self, column):
return (self.target_marginals.est[column] - self.allocated_marginals[column])
@property
def target_diff(self):
return self.target_marginals.est - self.allocated_marginals
@property
def rel_target_diff(self):
return ((self.target_marginals.est - self.allocated_marginals) / self.target_marginals.est) \
.replace({np.inf: 0, -np.inf: 0})
@property
def running_target_diff(self):
return self.target_marginals.est - self.running_allocated_marginals
@property
def error_frame(self):
return self.target_marginals \
.join(self.allocated_marginals.to_frame('allocated')) \
.join(self.m90_error.to_frame('m_90')) \
.join(self.target_diff.to_frame('diff')) \
.join(self.rel_target_diff.to_frame('rel'))
@property
def total_error(self):
"""Magnitude of the error vector"""
return np.sqrt(np.sum(np.square(self.target_diff)))
@property
def running_total_error(self):
"""Magnitude of the error vector"""
return np.sqrt(np.sum(np.square(self.running_target_diff)))
@property
def m90_error(self):
"""Error that is relative to the m90 limits. Any value within the m90 limits is an error of 0"""
# There the allocated marginal is withing the m90 range, return the target marginal estimate
# otherwise, return amount of the allocated marginals that is outside of the m90 range
t = self.allocated_marginals - self.target_marginals.est
t[self.allocated_marginals.between(self.target_marginals.est_min, self.target_marginals.est_max)] = 0
t[t > self.target_marginals.m90] = t - self.target_marginals.m90
t[t < -1 * self.target_marginals.m90] = t + self.target_marginals.m90
return t
@property
def m90_total_error(self):
return np.sqrt(np.sum(np.square(self.m90_error)))
@property
def m90_rms_error(self):
"""RMS error of the m90 differences. Like m90 total error, but divides
by the number of marginal value variables"""
return np.sqrt(np.sum(np.square(self.m90_total_error)) / len(self.target_marginals))
# Equivalent to cosine similarity when the vectors are both normalized
def cosine_similarities(self):
'''Calculate the cosine similaries for all of the sample population records
to the normalized error vector'''
return self.sample_pop_norm.dot(vector_normalize(self.target_diff.values).T)
def sample_multicol(self, columns):
targets = self.target_marginals.est
frames = []
for col in columns:
target = targets.loc[col]
if target > 0:
t = self.sample_pop[self.sample_pop[col] > 0]
w = self.sample_weights[self.sample_pop[col] > 0]
if len(t) > 0 and w.sum() > 0:
frames.append(sample_to_sum(target, t, col, w))
if frames:
return pd.concat(frames)
else:
return None
def _pop_to_weights(self, pop):
'''Return weights by counting the records in a population'''
t = pop.copy()
t.insert(0, 'dummy', 1)
t = t.groupby(t.index).dummy.count()
t = t.align(self.sample_weights)[0].fillna(0).values
return t
def initialize_weights_set_sample(self, f=0.85):
"""Sample from the sample population one column at a time, in groups of
columns that describe exclusive measures ( a household can contribute to
only one marginal column) Then, resample the population to match the correct number of
households"""
assert self._init
if f == 0:
return
frames = [
self.sample_multicol(self.hh_race_type_cols + self.gq_cols),
self.sample_multicol(self.hh_eth_type_cols),
self.sample_multicol(self.sex_age_cols),
]
frames = [f for f in frames if f is not None]
if len(frames) == 0:
return
# An initial population, which is of the wrong size, so just
# convert it to weights
t = pd.concat(frames)
initial_weights = self._pop_to_weights(t)
# These use those weights to re-sample the population.
target_count = self.household_count + self.gq_count
# Sample some fraction less than the target count, so we can vector walk to the final value
target_count = int(target_count * f)
t = self.sample_pop.sample(target_count, weights=initial_weights, replace=True)
self.allocated_weights = self._pop_to_weights(t)
self.unallocated_weights -= self.allocated_weights
self.running_allocated_marginals = self.allocated_marginals
def _rake(self, f=1):
# Sort the columns by the error
cols = list(self.error_frame.sort_values('diff', ascending=False).index)
# cols = random.sample(list(self.sample_pop.columns), len(self.sample_pop.columns)):
for col in cols:
b = self.sample_pop[col].mul(self.allocated_weights, axis=0).sum()
if b != 0:
a = self.target_marginals.loc[col].replace({pd.NA: 0}).est
r = a / b * f
self.allocated_weights[self.sample_pop[col] > 0] *= r
self.allocated_weights = np.round(self.allocated_weights, 0)
def initialize_weights_raking(self, n_iter=5, initial_weights='sample'):
"""Set the allocated weights to an initial value by 1-D raking, adjusting the
weights to fit the target marginal value for each column. """
if initial_weights == 'sample':
assert self.allocated_weights.shape == self.sample_weights.shape
self.allocated_weights = self.sample_weights
else:
self.allocated_weights = np.ones(self.allocated_weights.shape)
for i in range(n_iter):
# Sort the columns by the error
cols = list(self.error_frame.sort_values('diff', ascending=False).index)
# cols = random.sample(list(self.sample_pop.columns), len(self.sample_pop.columns)):
for col in cols:
b = self.sample_pop[col].mul(self.allocated_weights, axis=0).sum()
if b != 0:
a = self.target_marginals.loc[col].replace({pd.NA: 0}).est
r = a / b
self.allocated_weights[self.sample_pop[col] > 0] *= r
self.allocated_weights = np.round(self.allocated_weights, 0)
try:
self.allocated_weights = self.allocated_weights.values
except AttributeError:
pass
def initialize_weights_sample(self):
"""Initialize the allocated weights proportional to the sample population weights,
adjusted to the total population. """
self.allocated_weights = (self.sample_weights / (self.sample_weights.sum())).multiply(
self.household_count).values.round(0).astype(float)
self.unallocated_weights -= self.allocated_weights
def step_schedule_np(self, i, N, te, td, step_size_max, step_size_min, reversal_rate):
""" Return the next set of samples to add or remove
:param i: Loop index
:param N: Max number of iterations
:param cp: Sample population, transformed by make_cp
:param te: Total error
:param td: Marginals difference vector
:param step_size_max: Maximum step size
:param step_size_min: Minimum step size
:param reversal_rate: Probability to allow an increase in error
:param p: Probability to select each sample row. If None, use column 2 of cp
:return: Records to add or remove from the allocated population
"""
# Compute change in each column of the error vector for adding or subtracting in
# each of the sample population records
# idx 0 is the index of the row in self.sample_pop
# idx 1 is the sign, 1 or -1
# idx 2 is the selection weight
# idx 3 and up are the census count columns
expanded_pop = self.cp.values.astype(int)
p = expanded_pop[:, 2]
# For each new error vector, compute total error ( via vector length). By
# removing the current total error, we get the change in total error for
# adding or removing each row. ( positive values are better )
total_errors = (np.sqrt(np.square(expanded_pop[:, 3:] + td).sum(axis=1))) - te
# For error reducing records, sort them and then mutliply
# the weights by a linear ramp, so the larger values of
# reduction get a relative preference over the lower reduction values.
gt0 = np.argwhere(total_errors > 0).flatten() # Error reducing records
srt = np.argsort(total_errors) # Sorted by error
reducing_error = srt[np.in1d(srt, gt0)][::-1] # get the intersection. These are index values into self.cp
# Selection probabilities, multiply by linear ramp to preference higher values.
reducing_p = ((p[reducing_error]) * np.linspace(1, 0, len(reducing_error)))
rps = np.sum(reducing_p)
if rps > 0:
reducing_p = np.nan_to_num(reducing_p / rps)
else:
reducing_p = []
increasing_error = np.argwhere(total_errors < 0).flatten() # Error increasing indexes
increasing_p = p[increasing_error].flatten().clip(min=0)
ips = np.sum(increasing_p)
if ips != 0:
increasing_p = np.nan_to_num(increasing_p / ips) # normalize to 1
else:
increasing_p =[]
# Min number of record to return in this step. The error-increasing records are in
# addition to this number
step_size = int((step_size_max - step_size_min) * ((N - i) / N) + step_size_min)
# Randomly select from each group of increasing or reducing indexes.
cc = []
if len(increasing_error) > 0 and ips > 0:
cc.append(self.rng.choice(increasing_error, int(step_size * reversal_rate), p=increasing_p))
if len(reducing_error) > 0 and rps > 0:
cc.append(self.rng.choice(reducing_error, int(step_size), p=reducing_p))
idx = np.concatenate(cc)
# Columns are : 'index', 'sign', 'delta_err'
delta_err = total_errors[idx].reshape(-1, 1).round(0).astype(int)
return np.hstack([expanded_pop[idx][:, 0:2], delta_err]) # Return the index and sign columns of cp
def _loop_asignment(self, ss):
for j, (idx, sgn, *_) in enumerate(ss):
idx = int(idx)
if (self.allocated_weights[idx] > 0 and sgn < 0) or \
(self.unallocated_weights[idx]>0 and sgn > 0) :
self.running_allocated_marginals += (sgn * self.sample_pop.iloc[idx])
self.allocated_weights[idx] += sgn # Increment the count column
self.unallocated_weights[idx] -= sgn
def _numpy_assignment(self, ss):
# The following code is the numpy equivalent of the loop version of
# assignment to the allocated marginals. It is about 20% faster than the loop
# This selection on ss is the equivalent to this if statement in the loop version:
# if self.allocated_weights[idx] > 0 or sgn > 0:
#
ss = ss[np.logical_or(
np.isin(ss[:, 0], np.nonzero(self.allocated_weights > 0)), # self.allocated_weights[idx] > 0
ss[:, 1] > 0) # sgn > 0
]
# Assign the steps from the step schedule into the allocated weights
if len(ss):
idx = ss[:, 0].astype(int)
sgn = ss[:, 1]
# Update all weights by the array of signs
self.allocated_weights[idx] += sgn
# Don't allow negative weights
self.allocated_weights[self.allocated_weights < 0] = 0
# Add in the signed sampled to the running marginal, to save the cost
# of re-calculating the marginals.
self.running_allocated_marginals += \
np.multiply(self.sample_pop.iloc[idx], sgn.reshape(ss.shape[0], -1)).sum()
def _vector_walk(self, N=2000, min_iter=750, target_error=0.03,
step_size_min=3, step_size_max=15, reversal_rate=.3,
max_ssm=250, cb=None, memo=None):
"""Allocate PUMS records to this object's region.
Args:
N:
min_iter:
target_error:
step_size_min:
step_size_max:
reversal_rate:
max_ssm:
"""
assert self._init
if target_error < 1:
target_error = self.household_count * target_error
min_allocation = None # allocated weights at last minimum
steps_since_min = 0
min_error = self.total_error
self.running_allocated_marginals = self.allocated_marginals
if cb:
# vector_walk_callback(puma_task, tract_task, data, memo):
cb(memo.get('puma_task'), self, None, memo)
for i in range(N):
td = self.running_target_diff.values.astype(int)
te = vector_length(td)
# The unallocated weights can be updated both internally and externally --
# the array can be shared among all tracts in the puma
self.update_cp()
if te < min_error:
min_error = te
min_allocation = self.allocated_weights
steps_since_min = 0
else:
steps_since_min += 1
min_error = min(te, min_error)
if (i > min_iter and te < target_error) or steps_since_min > max_ssm:
break
try:
ss = self.step_schedule_np(i, N, te, td,
step_size_max, step_size_min, reversal_rate)
self._loop_asignment(ss)
yield (i, te, min_error, steps_since_min, len(ss))
except ValueError as e:
# Usually b/c numpy choice() got an empty array
pass
print(e)
raise
if min_allocation is not None:
self.allocated_weights = min_allocation
def vector_walk(self, N=2000, min_iter=750, target_error=0.03, step_size_min=3, step_size_max=10,
reversal_rate=.3, max_ssm=250, callback=None, memo=None,
stats = True):
"""Consider the target state and each household to be a vector. For each iteration
select a household vector with the best cosine similarity to the vector to the
target and add that household to the population. """
assert self._init
rows = []
ts = time()
errors = deque(maxlen=20)
errors.extend([self.total_error] * 20)
g = self._vector_walk(
N=N, min_iter=min_iter, target_error=target_error,
step_size_min=step_size_min, step_size_max=step_size_max,
reversal_rate=reversal_rate, max_ssm=max_ssm,
cb=callback, memo=memo)
if stats is not True:
list(g)
return []
else:
for i, te, min_error, steps_since_min, n_iter in g :
d = {'i': i, 'time': time() - ts, 'step_size': n_iter, 'error': te,
'target_error': target_error,
'total_error': te,
'size': np.sum(self.allocated_weights),
'ssm': steps_since_min,
'min_error': min_error,
'mean_error': np.mean(errors),
'std_error': np.std(errors),
'uw_sum': np.sum(self.unallocated_weights),
'total_count': self.total_count
}
rows.append(d)
errors.append(te)
if callback and i % 10 == 0:
# vector_walk_callback(puma_task, tract_task, data, memo):
callback(None, self, None, memo)
return rows
@classmethod
def get_us_tasks(cls, cache_dir, sl='tract', year=2018, release=5, limit=None, ignore_completed=True):
"""Return all of the tasks for all US states"""
from geoid.censusnames import stusab
tasks = []
for state in stusab.values():
state_tasks = cls.get_state_tasks(cache_dir, state, sl, year, release, limit, ignore_completed)
tasks.extend(state_tasks)
return tasks
@classmethod
def get_tasks(cls, cache_dir, state, sl='tract', year=2018, release=5,
limit=None, use_tqdm=False, ignore_completed=True):
if state.upper() == 'US':
return cls.get_us_tasks(cache_dir, sl, year, release, limit, use_tqdm, ignore_completed)
else:
return cls.get_state_tasks(cache_dir, state, sl, year, release, limit, ignore_completed)
@classmethod
def get_state_tasks(cls, cache_dir, state, sl='tract', year=2018, release=5,
limit=None, ignore_completed=True):
"""Fetch ( possibly download) the source data to generate allocation tasks,
and cache the data if a cache_dir is provided"""
from .acs import puma_tract_map
from synpums import build_acs, build_pums_households
from functools import partial
import pickle
_logger.info(f'Loading tasks for {state} from cache {cache_dir}')
cp = Path(cache_dir).joinpath('tasks', 'source', f"{state}-{year}-{release}/")
cp.mkdir(parents=True, exist_ok=True)
asc_p = cp.joinpath("acs.csv")
hh_p = cp.joinpath("households.csv")
tasks_p = cp.joinpath("tasks.pkl")
if limit:
from itertools import islice
limiter = partial(islice, limit)
else:
def limiter(g, *args, **kwargs):
yield from g
if tasks_p and tasks_p.exists():
with tasks_p.open('rb') as f:
_logger.debug(f"Returning cached tasks from {str(tasks_p)}")
return pickle.load(f)
# Cached ACS files
if asc_p and asc_p.exists():
tract_acs = pd.read_csv(asc_p, index_col='geoid', low_memory=False)
else:
tract_acs = build_acs(state, sl, year, release)
if asc_p:
tract_acs.to_csv(asc_p, index=True)
# Cached Households
if hh_p and hh_p.exists():
households = pd.read_csv(hh_p, index_col='SERIALNO', low_memory=False)
else:
households = build_pums_households(state, year=year, release=release)
if hh_p:
households.to_csv(hh_p, index=True)
hh = households.groupby('geoid')
hh_file_map = {}
for key, g in hh:
puma_p = cp.joinpath(f"pumas/{key}.csv")
puma_p.parent.mkdir(parents=True, exist_ok=True)
_logger.debug(f"Write puma file {str(puma_p)}")
g.to_csv(puma_p)
hh_file_map[key] = puma_p
pt_map = puma_tract_map()
tasks = []
for tract_geoid, targets in limiter(tract_acs.iterrows(), desc='Generate Tasks'):
try:
puma_geoid = pt_map[tract_geoid]
t = AllocationTask(tract_geoid, puma_geoid, asc_p, hh_file_map[puma_geoid], cache_dir)
if not t.path.exists() or ignore_completed is False:
tasks.append(t)
except Exception as e:
print("Error", tract_geoid, type(e), e)
if tasks_p:
with tasks_p.open('wb') as f:
_logger.debug(f"Write tasks file {str(tasks_p)}")
pickle.dump(tasks, f, pickle.HIGHEST_PROTOCOL)
return tasks
def run(self, *args, callback=None, memo=None, **kwargs):
self.init()
self.initialize_weights_sample()
rows = self.vector_walk(*args, callback=callback, memo=memo, **kwargs)
self.save_frame()
return rows
class PumaAllocator(object):
"""Simultaneously allocate all of the tracts in a pums, attempting to reduce the
error between the sum of the allocated weights and the PUMS weights"""
def __init__(self, puma_geoid, tasks, cache_dir, state, year=2018, release=5):
self.cache_dir = cache_dir
self.puma_geoid = puma_geoid
self.tasks = tasks
self.year = year
self.release = release
self.state = state
pums_files = [task.hh_ref for task in self.tasks]
assert all([e == pums_files[0] for e in pums_files])
self.pums_file = pums_files[0]
self._puma_target_marginals = None
self._puma_allocated_marginals = None
self._puma_max_weights = None
self._puma_allocated_weights = None
self._puma_unallocated_weights = None
self.pums = pd.read_csv(pums_files[0], low_memory=False)
self.weights = pd.DataFrame({
'allocated': 0,
'pums': self.pums.WGTP, # Original PUMS weights
'remaining': self.pums.WGTP # Remaining
})
self.prob = None
self.gq_cols = None
self.sex_age_cols = None
self.hh_size_cols = None
self.hh_race_type_cols = None
self.hh_eth_type_cols = None
self.hh_income_cols = None
self.replicate = 0
def init(self, init_method='sample'):
"""Initialize the weights of all of the tasks"""
from tqdm import tqdm
self.hh_ref = hh_source = pd.read_csv(self.tasks[0].hh_ref, index_col='SERIALNO', low_memory=False) \
.drop(columns=['geoid'], errors='ignore').astype('Int64')
self._puma_max_weights = hh_source.iloc[:, 0].reset_index(drop=True).astype(int)
self._puma_unallocated_weights = self._puma_max_weights.copy()
for task in tqdm(self.tasks):
task.init(puma_weights=self._puma_unallocated_weights)
if init_method == 'sample':
self.initialize_weights_sample(task)
if init_method == 'set':
task.initialize_weights_set_sample()
t0 = self.tasks[0] # Just to copy out some internal info.
self.gq_cols = t0.gq_cols
self.sex_age_cols = t0.sex_age_cols
self.hh_size_cols = t0.hh_size_cols
self.hh_race_type_cols = t0.hh_race_type_cols
self.hh_eth_type_cols = t0.hh_eth_type_cols
p = re.compile(r'b19025')
self.hh_income_cols = [c for c in t0.hh_source.columns if p.match(c)]
@classmethod
def get_tasks(cls, cache_dir, state, year=2018, release=5):
tasks = AllocationTask.get_state_tasks(cache_dir, state, sl='tract', year=2018, release=5)
puma_tasks = defaultdict(list)
for task in tasks:
puma_tasks[task.puma_geoid].append(task)
return puma_tasks
@classmethod
def get_allocators(cls, cache_dir, state, year=2018, release=5):
tasks = AllocationTask.get_state_tasks(cache_dir, state, sl='tract', year=2018, release=5)
puma_tasks = defaultdict(list)
for task in tasks:
puma_tasks[task.puma_geoid].append(task)
return [PumaAllocator(puma_geoid, tasks, cache_dir, state, year, release) for puma_geoid, tasks in
puma_tasks.items()]
def initialize_weights_sample(self, task, frac=.7):
"""Initialize the allocated weights proportional to the sample population weights,
adjusted to the total population. """
wf = self.weights_frame
assert wf.remaining.sum() != 0
wn1 = wf.remaining / wf.remaining.sum() # weights normalized to 1
task.allocated_weights = rand_round(wn1.multiply(task.household_count).values.astype(float))
task.unallocated_weights = np.clip(task.unallocated_weights-task.allocated_weights, a_min=0, a_max=None)
assert not any(task.unallocated_weights<0)
def vector_walk(self, N=1200, min_iter=5000, target_error=0.03, step_size_min=1,
step_size_max=10, reversal_rate=.3, max_ssm=150,
callback=None, memo=None):
"""Run a vector walk on all of the tracts tasks in this puma """
from itertools import cycle
rows = []
ts = time()
memo['puma_task'] = self
def make_vw(task):
return iter(task._vector_walk(
N=N, min_iter=min_iter, target_error=target_error,
step_size_min=step_size_min, step_size_max=step_size_max,
reversal_rate=reversal_rate, max_ssm=max_ssm,
cb=callback, memo=memo))
task_iters = [(task, make_vw(task)) for task in self.tasks]
stopped = set()
running = set([e[0] for e in task_iters])
memo['n_stopped'] = len(stopped)
memo['n_running'] = len(running)
memo['n_calls'] = 0
while True:
for task, task_iter in task_iters:
if task in running:
try:
i, te, min_error, steps_since_min, n_iter = next(task_iter)
memo['n_calls'] += 1
d = {'i': i, 'time': time() - ts, 'step_size': n_iter, 'error': te,
'target_error': target_error,
'size': np.sum(task.allocated_weights),
'ssm': steps_since_min,
'min_error': min_error,
'task': task
}
rows.append(d)
if callback and i % 10 == 0:
callback(self, task, d, memo)
except StopIteration:
stopped.add(task)
running.remove(task)
memo['n_stopped'] = len(stopped)
memo['n_running'] = len(running)
if len(running) == 0:
return rows
if callback:
# vector_walk_callback(puma_task, tract_task, data, memo):
callback(self, None, None, memo)
assert False # Should never get here.
def run(self, *args, callback=None, memo=None, **kwargs):
self.init(init_method='sample')
rows = self.vector_walk(*args, callback=callback, memo=memo, **kwargs)
self.save_frame()
return rows
def get_task(self, geoid):
for task in self.tasks:
if geoid == task.region_geoid:
return task
return None
def tune_puma_allocation(self):
"""Re-run all of the tasks in the puma, trying to reduce the discrepancy
between the """
task_iters = [(task, iter(task._vector_walk())) for task in self.tasks]
for task, task_iter in task_iters:
try:
task.cp_prob = self._update_probabilities()
row = next(task_iter)
print(task.region_geoid, self.rms_error, self.rms_weight_error, np.sum(task.cp_prob))
except StopIteration:
print(task.region_geoid, 'stopped')
@property
def weights_frame(self):
self.weights[
'allocated'] = self.puma_allocated_weights # np.sum(np.array([task.allocated_weights for task in self.tasks]), axis=0)
self.weights['remaining'] = self.weights.pums - self.weights.allocated
self.weights['dff'] = self.weights.allocated - self.weights.pums
self.weights['rdff'] = (self.weights.dff / self.weights.pums).fillna(0)
self.weights['p'] = self.weights.rdff
return self.weights
def _update_probabilities(self):
"""Update the running cp_probs, the probabilities for selecting each PUMS
household from the sample_pop, based on the error in weights for
the households at the Puma level"""
w = self.weights_frame
w['p_pos'] = - w.p.where(w.p < 0, 0)
w['p_neg'] = w.p.where(w.p > 0, 0)
self.prob = np.array(w[['p_neg', 'p_pos']].values.flat)
return self.prob
@property
def puma_target_marginals(self):
from .acs import build_acs
if self._puma_target_marginals is None:
_puma_marginals = build_acs(state=self.state, sl='puma', year=self.year, release=self.release)
cols = self.tasks[
0].target_marginals.index # [c for c in _puma_marginals.columns if c.startswith('b') and not c.endswith('_m90')]
self._puma_target_marginals = _puma_marginals.loc[self.puma_geoid][cols]
return self._puma_target_marginals
@property
def puma_allocated_marginals(self):
return self.allocated_marginals.sum()
@property
def allocated_marginals(self):
series = {task.region_geoid: task.allocated_marginals for task in self.tasks}
return pd.DataFrame(series).T
@property
def allocated_weights(self):
series = {task.region_geoid: task.allocated_weights for task in self.tasks}
return pd.DataFrame(series).T
@property
def puma_allocated_weights(self):
return self.allocated_weights.sum()
@property
def target_marginals(self):
series = {task.region_geoid: task.target_marginals.est for task in self.tasks}
return | pd.DataFrame(series) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Tools for fast Linear Problem file writing. This module contains
- io functions for writing out variables, constraints and objective
into a lp file.
- functions to create lp format based linear expression
- solver functions which read the lp file, run the problem and return the
solution
This module supports the linear optimal power flow calculation without using
pyomo (see module linopt.py)
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
from .descriptors import Dict
import pandas as pd
import os
import logging, re, io, subprocess
import numpy as np
from pandas import IndexSlice as idx
from importlib.util import find_spec
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
# =============================================================================
# Front end functions
# =============================================================================
def define_variables(n, lower, upper, name, attr='', axes=None, spec='', mask=None):
"""
Defines variable(s) for pypsa-network with given lower bound(s) and upper
bound(s). The variables are stored in the network object under n.vars with
key of the variable name. If multiple variables are defined at ones, at
least one of lower and upper has to be an array (including pandas) of
shape > (1,) or axes have to define the dimensions of the variables.
Parameters
----------
n : pypsa.Network
lower : pd.Series/pd.DataFrame/np.array/str/float
lower bound(s) for the variable(s)
upper : pd.Series/pd.DataFrame/np.array/str/float
upper bound(s) for the variable(s)
name : str
general name of the variable (or component which the variable is
referring to). The variable will then be stored under:
* n.vars[name].pnl if the variable is two-dimensional
* n.vars[name].df if the variable is one-dimensional
but can easily be accessed with :func:`get_var(n, name, attr)`
attr : str default ''
Specifying name of the variable, defines under which name the variable(s)
are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df
if one-dimensional
axes : pd.Index or tuple of pd.Index objects, default None
Specifies the axes and therefore the shape of the variables if bounds
are single strings or floats. This is helpful when multiple variables
have the same upper and lower bound.
mask: pd.DataFrame/np.array
Boolean mask with False values for variables which are skipped.
The shape of the mask has to match the shape the added variables.
Example
--------
Let's say we want to define a demand-side-managed load at each bus of
network n, which has a minimum of 0 and a maximum of 10. We then define
lower bound (lb) and upper bound (ub) and pass it to define_variables
>>> from pypsa.linopt import define_variables, get_var
>>> lb = pd.DataFrame(0, index=n.snapshots, columns=n.buses.index)
>>> ub = pd.DataFrame(10, index=n.snapshots, columns=n.buses.index)
>>> define_variables(n, lb, ub, 'DSM', 'variableload')
Now the variables can be accessed by :func:`pypsa.linopt.get_var` using
>>> variables = get_var(n, 'DSM', 'variableload')
Note that this is usefull for the `extra_functionality` argument.
"""
var = write_bound(n, lower, upper, axes, mask)
set_varref(n, var, name, attr, spec=spec)
return var
def define_binaries(n, axes, name, attr='', spec='', mask=None):
"""
Defines binary-variable(s) for pypsa-network. The variables are stored
in the network object under n.vars with key of the variable name.
For each entry for the pd.Series of pd.DataFrame spanned by the axes
argument the function defines a binary.
Parameters
----------
n : pypsa.Network
axes : pd.Index or tuple of pd.Index objects
Specifies the axes and therefore the shape of the variables.
name : str
general name of the variable (or component which the variable is
referring to). The variable will then be stored under:
* n.vars[name].pnl if the variable is two-dimensional
* n.vars[name].df if the variable is one-dimensional
attr : str default ''
Specifying name of the variable, defines under which name the variable(s)
are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df
if one-dimensional
mask: pd.DataFrame/np.array
Boolean mask with False values for variables which are skipped.
The shape of the mask has to match the shape given by axes.
See also
---------
define_variables
"""
var = write_binary(n, axes)
set_varref(n, var, name, attr, spec=spec)
return var
def define_constraints(n, lhs, sense, rhs, name, attr='', axes=None, spec='',
mask=None):
"""
Defines constraint(s) for pypsa-network with given left hand side (lhs),
sense and right hand side (rhs). The constraints are stored in the network
object under n.cons with key of the constraint name. If multiple constraints
are defined at ones, only using np.arrays, then the axes argument can be used
for defining the axes for the constraints (this is especially recommended
for time-dependent constraints). If one of lhs, sense and rhs is a
pd.Series/pd.DataFrame the axes argument is not necessary.
Parameters
----------
n: pypsa.Network
lhs: pd.Series/pd.DataFrame/np.array/str/float
left hand side of the constraint(s), created with
:func:`pypsa.linot.linexpr`.
sense: pd.Series/pd.DataFrame/np.array/str/float
sense(s) of the constraint(s)
rhs: pd.Series/pd.DataFrame/np.array/str/float
right hand side of the constraint(s), must only contain pure constants,
no variables
name: str
general name of the constraint (or component which the constraint is
referring to). The constraint will then be stored under:
* n.cons[name].pnl if the constraint is two-dimensional
* n.cons[name].df if the constraint is one-dimensional
attr: str default ''
Specifying name of the constraint, defines under which name the
constraint(s) are stored in n.cons[name].pnl if two-dimensional or in
n.cons[name].df if one-dimensional
axes: pd.Index or tuple of pd.Index objects, default None
Specifies the axes if all of lhs, sense and rhs are np.arrays or single
strings or floats.
mask: pd.DataFrame/np.array
Boolean mask with False values for constraints which are skipped.
The shape of the mask has to match the shape of the array that come out
when combining lhs, sense and rhs.
Example
--------
Let's say we want to constraint all gas generators to a maximum of 100 MWh
during the first 10 snapshots. We then firstly get all operational variables
for this subset and constraint there sum to less equal 100.
>>> from pypsa.linopt import get_var, linexpr, define_constraints
>>> gas_i = n.generators.query('carrier == "Natural Gas"').index
>>> gas_vars = get_var(n, 'Generator', 'p').loc[n.snapshots[:10], gas_i]
>>> lhs = linexpr((1, gas_vars)).sum().sum()
>>> define_(n, lhs, '<=', 100, 'Generator', 'gas_power_limit')
Now the constraint references can be accessed by
:func:`pypsa.linopt.get_con` using
>>> cons = get_var(n, 'Generator', 'gas_power_limit')
Under the hood they are stored in n.cons.Generator.pnl.gas_power_limit.
For retrieving their shadow prices add the general name of the constraint
to the keep_shadowprices argument.
Note that this is useful for the `extra_functionality` argument.
"""
con = write_constraint(n, lhs, sense, rhs, axes, mask)
set_conref(n, con, name, attr, spec=spec)
return con
# =============================================================================
# writing functions
# =============================================================================
def _get_handlers(axes, *maybearrays):
axes = [axes] if isinstance(axes, pd.Index) else axes
if axes is None:
axes, shape = broadcasted_axes(*maybearrays)
else:
shape = tuple(map(len, axes))
size = np.prod(shape)
return axes, shape, size
def write_bound(n, lower, upper, axes=None, mask=None):
"""
Writer function for writing out multiple variables at a time. If lower and
upper are floats it demands to give pass axes, a tuple of (index, columns)
or (index), for creating the variable of same upper and lower bounds.
Return a series or frame with variable references.
"""
axes, shape, size = _get_handlers(axes, lower, upper)
if not size: return pd.Series(dtype=float)
n._xCounter += size
variables = np.arange(n._xCounter - size, n._xCounter).reshape(shape)
lower, upper = _str_array(lower), _str_array(upper)
exprs = lower + ' <= x' + _str_array(variables, True) + ' <= '+ upper + '\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
variables = np.where(mask, variables, -1)
n.bounds_f.write(join_exprs(exprs))
return to_pandas(variables, *axes)
def write_constraint(n, lhs, sense, rhs, axes=None, mask=None):
"""
Writer function for writing out multiple constraints to the corresponding
constraints file. If lower and upper are numpy.ndarrays it axes must not be
None but a tuple of (index, columns) or (index).
Return a series or frame with constraint references.
"""
axes, shape, size = _get_handlers(axes, lhs, sense, rhs)
if not size: return pd.Series()
n._cCounter += size
cons = np.arange(n._cCounter - size, n._cCounter).reshape(shape)
if isinstance(sense, str):
sense = '=' if sense == '==' else sense
lhs, sense, rhs = _str_array(lhs), _str_array(sense), _str_array(rhs)
exprs = 'c' + _str_array(cons, True) + ':\n' + lhs + sense + ' ' + rhs + '\n\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
cons = np.where(mask, cons, -1)
n.constraints_f.write(join_exprs(exprs))
return to_pandas(cons, *axes)
def write_binary(n, axes, mask=None):
"""
Writer function for writing out multiple binary-variables at a time.
According to the axes it writes out binaries for each entry the pd.Series
or pd.DataFrame spanned by axes. Returns a series or frame with variable
references.
"""
axes, shape, size = _get_handlers(axes)
n._xCounter += size
variables = np.arange(n._xCounter - size, n._xCounter).reshape(shape)
exprs = 'x' + _str_array(variables, True) + '\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
variables = np.where(mask, variables, -1)
n.binaries_f.write(join_exprs(exprs))
return to_pandas(variables, *axes)
def write_objective(n, terms):
"""
Writer function for writing out one or multiple objective terms.
Parameters
----------
n : pypsa.Network
terms : str/numpy.array/pandas.Series/pandas.DataFrame
String or array of strings which represent new objective terms, built
with :func:`linexpr`
"""
n.objective_f.write(join_exprs(terms))
# =============================================================================
# helpers, helper functions
# =============================================================================
def broadcasted_axes(*dfs):
"""
Helper function which, from a collection of arrays, series, frames and other
values, retrieves the axes of series and frames which result from
broadcasting operations. It checks whether index and columns of given
series and frames, repespectively, are aligned. Using this function allows
to subsequently use pure numpy operations and keep the axes in the
background.
"""
axes = []
shape = (1,)
if set(map(type, dfs)) == {tuple}:
dfs = sum(dfs, ())
for df in dfs:
shape = np.broadcast_shapes(shape, np.asarray(df).shape)
if isinstance(df, (pd.Series, pd.DataFrame)):
if len(axes):
assert (axes[-1] == df.axes[-1]).all(), ('Series or DataFrames '
'are not aligned. Please make sure that all indexes and '
'columns of Series and DataFrames going into the linear '
'expression are equally sorted.')
axes = df.axes if len(df.axes) > len(axes) else axes
return axes, shape
def align_with_static_component(n, c, attr):
"""
Alignment of time-dependent variables with static components. If c is a
pypsa.component name, it will sort the columns of the variable according
to the static component.
"""
if c in n.all_components and (c, attr) in n.variables.index:
if not n.variables.pnl[c, attr]: return
if len(n.vars[c].pnl[attr].columns) != len(n.df(c).index): return
n.vars[c].pnl[attr] = n.vars[c].pnl[attr].reindex(columns=n.df(c).index)
def linexpr(*tuples, as_pandas=True, return_axes=False):
"""
Elementwise concatenation of tuples in the form (coefficient, variables).
Coefficient and variables can be arrays, series or frames. Per default
returns a pandas.Series or pandas.DataFrame of strings. If return_axes
is set to True the return value is split into values and axes, where values
are the numpy.array and axes a tuple containing index and column if present.
Parameters
----------
tuples: tuple of tuples
Each tuple must of the form (coeff, var), where
* coeff is a numerical value, or a numerical array, series, frame
* var is a str or a array, series, frame of variable strings
as_pandas : bool, default True
Whether to return to resulting array as a series, if 1-dimensional, or
a frame, if 2-dimensional. Supersedes return_axes argument.
return_axes: Boolean, default False
Whether to return index and column (if existent)
Example
-------
Initialize coefficients and variables
>>> coeff1 = 1
>>> var1 = pd.Series(['a1', 'a2', 'a3'])
>>> coeff2 = pd.Series([-0.5, -0.3, -1])
>>> var2 = pd.Series(['b1', 'b2', 'b3'])
Create the linear expression strings
>>> linexpr((coeff1, var1), (coeff2, var2))
0 +1.0 a1 -0.5 b1
1 +1.0 a2 -0.3 b2
2 +1.0 a3 -1.0 b3
dtype: object
For a further step the resulting frame can be used as the lhs of
:func:`pypsa.linopt.define_constraints`
For retrieving only the values:
>>> linexpr((coeff1, var1), (coeff2, var2), as_pandas=False)
array(['+1.0 a1 -0.5 b1', '+1.0 a2 -0.3 b2', '+1.0 a3 -1.0 b3'], dtype=object)
"""
axes, shape = broadcasted_axes(*tuples)
expr = np.repeat('', np.prod(shape)).reshape(shape).astype(object)
if np.prod(shape):
for coeff, var in tuples:
expr = expr + _str_array(coeff) + ' x' + _str_array(var, True) + '\n'
if isinstance(expr, np.ndarray):
isna = np.isnan(coeff) | np.isnan(var) | (var == -1)
expr = np.where(isna, '', expr)
if return_axes:
return (expr, *axes)
if as_pandas:
return to_pandas(expr, *axes)
return expr
def to_pandas(array, *axes):
"""
Convert a numpy array to pandas.Series if 1-dimensional or to a
pandas.DataFrame if 2-dimensional. Provide index and columns if needed
"""
return pd.Series(array, *axes) if array.ndim == 1 else pd.DataFrame(array, *axes)
_to_float_str = lambda f: '%+f'%f
_v_to_float_str = np.vectorize(_to_float_str, otypes=[object])
_to_int_str = lambda d: '%d'%d
_v_to_int_str = np.vectorize(_to_int_str, otypes=[object])
def _str_array(array, integer_string=False):
if isinstance(array, (float, int)):
if integer_string:
return _to_int_str(array)
return _to_float_str(array)
array = np.asarray(array)
if array.dtype.type == np.str_:
array = np.asarray(array, dtype=object)
if array.dtype < str and array.size:
if integer_string:
array = np.nan_to_num(array, False, -1)
return _v_to_int_str(array)
return _v_to_float_str(array)
else:
return array
def join_exprs(df):
"""
Helper function to join arrays, series or frames of strings together.
"""
return ''.join(np.asarray(df).flatten())
# =============================================================================
# references to vars and cons, rewrite this part to not store every reference
# =============================================================================
def _add_reference(ref_dict, df, attr, pnl=True):
if pnl:
if attr in ref_dict.pnl:
ref_dict.pnl[attr][df.columns] = df
else:
ref_dict.pnl[attr] = df
else:
if attr in ref_dict.df:
ref_dict.df = pd.concat([ref_dict.df, df.to_frame(attr)])
else:
ref_dict.df[attr] = df
def set_varref(n, variables, c, attr, spec=''):
"""
Sets variable references to the network.
One-dimensional variable references will be collected at `n.vars[c].df`,
two-dimensional varaibles in `n.vars[c].pnl`. For example:
* nominal capacity variables for generators are stored in
`n.vars.Generator.df.p_nom`
* operational variables for generators are stored in
`n.vars.Generator.pnl.p`
"""
if not variables.empty:
pnl = variables.ndim == 2
if c not in n.variables.index:
n.vars[c] = Dict(df=pd.DataFrame(), pnl=Dict())
if ((c, attr) in n.variables.index) and (spec != ''):
n.variables.at[idx[c, attr], 'specification'] += ', ' + spec
else:
n.variables.loc[idx[c, attr], :] = [pnl, spec]
_add_reference(n.vars[c], variables, attr, pnl=pnl)
def set_conref(n, constraints, c, attr, spec=''):
"""
Sets constraint references to the network.
One-dimensional constraint references will be collected at `n.cons[c].df`,
two-dimensional in `n.cons[c].pnl`
For example:
* constraints for nominal capacity variables for generators are stored in
`n.cons.Generator.df.mu_upper`
* operational capacity limits for generators are stored in
`n.cons.Generator.pnl.mu_upper`
"""
if not constraints.empty:
pnl = constraints.ndim == 2
if c not in n.constraints.index:
n.cons[c] = Dict(df=pd.DataFrame(), pnl=Dict())
if ((c, attr) in n.constraints.index) and (spec != ''):
n.constraints.at[idx[c, attr], 'specification'] += ', ' + spec
else:
n.constraints.loc[idx[c, attr], :] = [pnl, spec]
_add_reference(n.cons[c], constraints, attr, pnl=pnl)
def get_var(n, c, attr, pop=False):
"""
Retrieves variable references for a given static or time-depending
attribute of a given component. The function looks into n.variables to
detect whether the variable is a time-dependent or static.
Parameters
----------
n : pypsa.Network
c : str
component name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
>>> get_var(n, 'Generator', 'p')
"""
vvars = n.vars[c].pnl if n.variables.pnl[c, attr] else n.vars[c].df
return vvars.pop(attr) if pop else vvars[attr]
def get_con(n, c, attr, pop=False):
"""
Retrieves constraint references for a given static or time-depending
attribute of a give component.
Parameters
----------
n : pypsa.Network
c : str
component name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
get_con(n, 'Generator', 'mu_upper')
"""
cons = n.cons[c].pnl if n.constraints.pnl[c, attr] else n.cons[c].df
return cons.pop(attr) if pop else cons[attr]
def get_sol(n, name, attr=''):
"""
Retrieves solution for a given variable. Note that a lookup of all stored
solutions is given in n.solutions.
Parameters
----------
n : pypsa.Network
c : str
general variable name (or component name if variable is attached to a
component)
attr: str
attribute name of the variable
Example
-------
get_dual(n, 'Generator', 'mu_upper')
"""
pnl = n.solutions.at[(name, attr), 'pnl']
if n.solutions.at[(name, attr), 'in_comp']:
return n.pnl(name)[attr] if pnl else n.df(name)[attr + '_opt']
else:
return n.sols[name].pnl[attr] if pnl else n.sols[name].df[attr]
def get_dual(n, name, attr=''):
"""
Retrieves shadow price for a given constraint. Note that for retrieving
shadow prices of a custom constraint, its name has to be passed to
`keep_references` in the lopf, or `keep_references` has to be set to True.
Note that a lookup of all stored shadow prices is given in n.dualvalues.
Parameters
----------
n : pypsa.Network
c : str
constraint name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
get_dual(n, 'Generator', 'mu_upper')
"""
pnl = n.dualvalues.at[(name, attr), 'pnl']
if n.dualvalues.at[(name, attr), 'in_comp']:
return n.pnl(name)[attr] if pnl else n.df(name)[attr]
else:
return n.duals[name].pnl[attr] if pnl else n.duals[name].df[attr]
# =============================================================================
# solvers
# =============================================================================
def set_int_index(ser):
ser.index = ser.index.str[1:].astype(int)
return ser
def run_and_read_highs(n, problem_fn, solution_fn, solver_logfile,
solver_options={}, warmstart=None, store_basis=True):
"""
Highs solver function. Reads a linear problem file and passes it to the highs
solver. If the solution is feasible the function returns the objective,
solution and dual constraint variables. Highs must be installed for usage.
Documentation: https://www.maths.ed.ac.uk/hall/HiGHS/
Installation
-------------
The script might only work for version HiGHS 1.1.1. Installation steps::
sudo apt-get install cmake # if not installed
git clone [email protected]:ERGO-Code/HiGHS.git
cd HiGHS
git checkout 95342daa73543cc21e5b27db3e0fbf7330007541 # moves to HiGHS 1.1.1
mkdir build
cd build
cmake ..
make
ctest
Then in .bashrc add paths of executables and library ::
export PATH="${PATH}:/foo/HiGHS/build/bin"
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/foo/HiGHS/build/lib"
source .bashrc
Now when typing ``highs`` in the terminal you should see something like ::
Running HiGHS 1.1.1 [date: 2021-11-14, git hash: 95342daa]
Architecture
-------------
The function reads and execute (i.e. subprocess.Popen,...) terminal
commands of the solver. Meaning the command can be also executed at your
command window/terminal if HiGHs is installed. Executing the commands on
your local terminal helps to identify the raw outputs that are useful for
developing the interface further.
All functions below the "process = ..." do only read and save the outputs
generated from the HiGHS solver. These parts are solver specific and
depends on the solver output.
Solver options
---------------
Solver options are read by the 1) command window and the 2) option_file.txt
1) An example list of solver options executable by the command window is given here:
Examples:
--model_file arg File of model to solve.
--presolve arg Presolve: "choose" by default - "on"/"off" are alternatives.
--solver arg Solver: "choose" by default - "simplex"/"ipm" are alternatives.
--parallel arg Parallel solve: "choose" by default - "on"/"off" are alternatives.
--time_limit arg Run time limit (double).
--options_file arg File containing HiGHS options.
-h, --help Print help.
2) The options_file.txt gives some more options, see a full list here:
https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.set
By default, we insert a couple of options for the ipm solver. The dictionary
can be overwritten by simply giving the new values. For instance, you could
write a dictionary replacing some of the default values or adding new options:
```
solver_options = {
name: highs,
method: ipm,
parallel: "on",
<option_name>: <value>,
}
```
Note, the <option_name> and <value> must be equivalent to the name convention
of HiGHS. Some function exist that are not documented, check their GitHub file:
https://github.com/ERGO-Code/HiGHS/blob/master/src/lp_data/HighsOptions.h
Output
------
status : string,
"ok" or "warning"
termination_condition : string,
Contains "optimal", "infeasible",
variables_sol : series
constraints_dual : series
objective : float
"""
logger.warning("The HiGHS solver can potentially solve towards variables that slightly deviate from Gurobi,cbc,glpk")
options_fn = "highs_options.txt"
default_dict = {
"method": "ipm",
"primal_feasibility_tolerance": 1e-04,
"dual_feasibility_tolerance": 1e-05,
"ipm_optimality_tolerance": 1e-6,
"presolve": "on",
"run_crossover": True,
"parallel": "off",
"threads": 4,
"solution_file": solution_fn,
"write_solution_to_file": True,
"write_solution_style": 1,
"log_to_console": True,
}
# update default_dict through solver_options and write to file
default_dict.update(solver_options)
method = default_dict.pop("method", "ipm")
logger.info(f"Options: \"{default_dict}\". List of options: https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.set")
f1 = open(options_fn, "w")
f1.write('\n'.join([f"{k} = {v}" for k, v in default_dict.items()]))
f1.close()
# write (terminal) commands
command = f"highs --model_file {problem_fn} "
if warmstart:
logger.warning("Warmstart, not available in HiGHS. Will be ignored.")
command += f"--solver {method} --options_file {options_fn}"
logger.info(f"Solver command: \"{command}\"")
# execute command and store command window output
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
universal_newlines=True
)
def read_until_break():
# Function that reads line by line the command window
while True:
out = process.stdout.readline(1)
if out == '' and process.poll() != None:
break
if out != '':
yield out
# converts stdout (standard terminal output) to pandas dataframe
log = io.StringIO(''.join(read_until_break())[:])
log = pd.read_csv(log, sep=':', index_col=0, header=None)[1].squeeze()
if solver_logfile is not None:
log.to_csv(solver_logfile, sep="\t")
log.index = log.index.str.strip()
os.remove(options_fn)
# read out termination_condition from `info`
model_status = log["Model status"].strip().lower()
if "optimal" in model_status:
status = "ok"
termination_condition = model_status
elif "infeasible" in model_status:
status = "warning"
termination_condition = model_status
else:
status = 'warning'
termination_condition = model_status
objective = float(log["Objective value"])
# read out solution file (.sol)
f = open(solution_fn, "rb")
trimed_sol_fn = re.sub(rb'\*\*\s+', b'', f.read())
f.close()
sol = pd.read_csv(io.BytesIO(trimed_sol_fn), header=[1], sep=r'\s+')
row_no = sol[sol["Index"] == 'Rows'].index[0]
sol = sol.drop(row_no+1) # Removes header line after "Rows"
sol_rows = sol[(sol.index > row_no)]
sol_cols = sol[(sol.index < row_no)].set_index("Name").pipe(set_int_index)
variables_sol = pd.to_numeric(sol_cols["Primal"], errors="raise")
constraints_dual = pd.to_numeric(sol_rows["Dual"], errors="raise").reset_index(drop=True)
constraints_dual.index += 1
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_cbc(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the cbc
solver. If the solution is successful it returns variable solutions and
constraint dual values.
For more information on the solver options, run 'cbc' in your shell
"""
with open(problem_fn, 'rb') as f:
for str in f.readlines():
assert (("> " in str.decode('utf-8')) == False), (">, must be"
"changed to >=")
assert (("< " in str.decode('utf-8')) == False), ("<, must be"
"changed to <=")
#printingOptions is about what goes in solution file
command = f"cbc -printingOptions all -import {problem_fn} "
if warmstart:
command += f'-basisI {warmstart} '
if (solver_options is not None) and (solver_options != {}):
command += solver_options
command += f"-solve -solu {solution_fn} "
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
command += f'-basisO {n.basis_fn} '
if not os.path.exists(solution_fn):
os.mknod(solution_fn)
log = open(solver_logfile, 'w') if solver_logfile is not None else subprocess.PIPE
result = subprocess.Popen(command.split(' '), stdout=log)
result.wait()
with open(solution_fn, "r") as f:
data = f.readline()
if data.startswith("Optimal - objective value"):
status = "ok"
termination_condition = "optimal"
objective = float(data[len("Optimal - objective value "):])
elif "Infeasible" in data:
status = "warning"
termination_condition = "infeasible"
else:
status = 'warning'
termination_condition = "other"
if termination_condition != "optimal":
return status, termination_condition, None, None, None
f = open(solution_fn,"rb")
trimed_sol_fn = re.sub(rb'\*\*\s+', b'', f.read())
f.close()
sol = pd.read_csv(io.BytesIO(trimed_sol_fn), header=None, skiprows=[0],
sep=r'\s+', usecols=[1,2,3], index_col=0)
variables_b = sol.index.str[0] == 'x'
variables_sol = sol[variables_b][2].pipe(set_int_index)
constraints_dual = sol[~variables_b][3].pipe(set_int_index)
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_glpk(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the glpk
solver. If the solution is successful it returns variable solutions and
constraint dual values.
For more information on the glpk solver options:
https://kam.mff.cuni.cz/~elias/glpk.pdf
"""
# TODO use --nopresol argument for non-optimal solution output
command = (f"glpsol --lp {problem_fn} --output {solution_fn}")
if solver_logfile is not None:
command += f' --log {solver_logfile}'
if warmstart:
command += f' --ini {warmstart}'
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
command += f' -w {n.basis_fn}'
if (solver_options is not None) and (solver_options != {}):
command += solver_options
result = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
result.wait()
f = open(solution_fn)
def read_until_break(f):
linebreak = False
while not linebreak:
line = f.readline()
linebreak = line == '\n'
yield line
info = io.StringIO(''.join(read_until_break(f))[:-2])
info = pd.read_csv(info, sep=':', index_col=0, header=None)[1]
termination_condition = info.Status.lower().strip()
objective = float(re.sub(r'[^0-9\.\+\-e]+', '', info.Objective))
if termination_condition in ["optimal","integer optimal"]:
status = "ok"
termination_condition = "optimal"
elif termination_condition == "undefined":
status = "warning"
termination_condition = "infeasible"
else:
status = "warning"
if termination_condition != 'optimal':
return status, termination_condition, None, None, None
duals = io.StringIO(''.join(read_until_break(f))[:-2])
duals = pd.read_fwf(duals)[1:].set_index('Row name')
if 'Marginal' in duals:
constraints_dual = | pd.to_numeric(duals['Marginal'], 'coerce') | pandas.to_numeric |
import math
import pandas as pd
import os
from abc import ABC, abstractclassmethod
class Base(ABC):
'''Abstract class'''
def __init__(self, formula, iter = 8, dec_places=4):
self.formula = lambda x: eval(formula)
self.a = int(input('Value of A = '))
self.b = int(input('Value of B = '))
self.path = os.getcwd()
self.iter = iter
self.dec_places = dec_places
self.i = 0
self.df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings("ignore")
import os
from stockscraperalpha import getStockInfo
from functions import SMA, EMA,DEMA,MACD,RSI
def show_menu():
return """[1]change stock
[2]plot
"""
def getInitialInfo(stockname):
if os.path.isfile(f"StockTickers/{stockname}"):
return pd.read_csv(f"StockTickers/{stockname}",index_col='Date')
else:
return getStockInfo(stockname)
def addDataFrameTI(data_frame):
data_frame['SMA-13'] = SMA(data_frame,period=13)
data_frame['SMA-50'] = SMA(data_frame,period=50)
data_frame['SMA-120'] = SMA(data_frame,period=120)
data_frame['EMA-12'] = EMA(data_frame,period=12)
data_frame['EMA-26'] = EMA(data_frame,period=26)
data_frame['EMA-120'] = EMA(data_frame,period=120)
temp = MACD(data_frame)
data_frame['MACD'] = temp['MACD'].values
data_frame['Signal'] = temp['Signal'].values
data_frame['RSI'] = RSI(data_frame)['RSI'].values
data_frame['DEMA-50'] = DEMA(data_frame,period=50)
data_frame['DEMA-180'] = DEMA(data_frame,period=180)
def cleanDataFrame(data_frame,period=20):
indexes_to_keep = [i for i in range(period+180)]
temp_data = data_frame.take(list(indexes_to_keep))
temp_data = temp_data.iloc[::-1]
addDataFrameTI(temp_data)
indexes_to_drop = [i for i in range(180)]
indexes_to_keep = set(range(temp_data.shape[0])) - set(indexes_to_drop)
temp_data = temp_data.take(list(indexes_to_keep))
return temp_data
def plotInfo(data):
plt.figure(figsize=(12.2,6.4))
if data['Buy'].notnull().values.any():
plt.scatter(data.index, data['Buy'],color='green',label='Buy Signal',marker='^',alpha=1)
if data['Sell'].notnull().values.any():
plt.scatter(data.index, data['Sell'],color='red',label='Sell Signal',marker='v',alpha=1)
plt.plot(data['SMA-120'],label='SMA_long',alpha=0.35)
plt.plot(data['EMA-120'],label='EMA_long',alpha=0.35)
plt.plot(data['Close'],label='Close Price',alpha=0.35)
plt.title('Close Price Buy and Sell Signals')
plt.ylabel('USD Price ($)',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.legend(loc='upper left')
plt.show()
def buy_sell(data):
buy_list = []
sell_list = []
flag = 1
signal = 0
for i in range(len(data)):
if signal == 1:
buy_list.append(data['Close'][i])
sell_list.append(None)
signal = 0
elif signal == -1:
sell_list.append(data['Close'][i])
buy_list.append(None)
signal = 0
else:
buy_list.append(None)
sell_list.append(None)
if flag == 1 and data['EMA-12'][i]>data['EMA-26'][i] and data['EMA-120'][i]>data['SMA-120'][i]:
#buy
signal = 1
flag = -1
elif flag == -1 and data['EMA-12'][i]<data['EMA-26'][i]:
#sell
signal = -1
flag = 1
else:
signal = 0
return buy_list,sell_list
def main():
stockname = input("Enter stockname: ")
df = | pd.DataFrame() | pandas.DataFrame |
"""
This tests whether the Study object was created correctly.
No computation or visualization tests yet.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six import iteritems
from collections import Iterable
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
import pytest
import semantic_version
##############################################################################
# FIXTURES
@pytest.fixture(params=['expression', 'splicing'])
def data_type(request):
"""data_type fixture"""
return request.param
@pytest.fixture(params=[None, 'subset1'],
ids=['color_samples_by_none', 'color_samples_by_subset1'])
def color_samples_by(request, metadata_phenotype_col):
"""color_samples_by fixture"""
if request.param == 'phenotype':
return metadata_phenotype_col
else:
return request.param
class TestStudy(object):
# @pytest.fixture
# def n_groups(self):
# return 3
##########################################################################
@pytest.fixture
def study(self,
metadata_data, metadata_kws,
mapping_stats_data, mapping_stats_kws,
expression_data, expression_kws,
splicing_data, splicing_kws,
gene_ontology_data):
"""study fixture"""
from flotilla import Study
kwargs = {}
metadata = metadata_data.copy()
splicing = splicing_data.copy()
expression = expression_data.copy()
mapping_stats = mapping_stats_data.copy()
gene_ontology = gene_ontology_data.copy()
kw_pairs = (('metadata', metadata_kws),
('mapping_stats', mapping_stats_kws),
('expression', expression_kws),
('splicing', splicing_kws))
for data_type, kws in kw_pairs:
for kw_name, kw_value in iteritems(kws):
kwargs['{}_{}'.format(data_type, kw_name)] = kw_value
return Study(metadata,
mapping_stats_data=mapping_stats,
expression_data=expression,
splicing_data=splicing,
gene_ontology_data=gene_ontology,
**kwargs)
def test_init(self, metadata_data):
from flotilla import Study
metadata = metadata_data.copy()
study = Study(metadata)
metadata['outlier'] = False
true_default_sample_subsets = list(sorted(list(set(
study.metadata.sample_subsets.keys()).difference(
set(study.default_sample_subset)))))
true_default_sample_subsets.insert(0, study.default_sample_subset)
pdt.assert_frame_equal(study.metadata.data, metadata)
pdt.assert_equal(study.version, '0.1.0')
pdt.assert_equal(study.pooled, None)
pdt.assert_equal(study.technical_outliers, None)
pdt.assert_equal(study.phenotype_col, study.metadata.phenotype_col)
pdt.assert_equal(study.phenotype_order, study.metadata.phenotype_order)
pdt.assert_equal(study.phenotype_to_color,
study.metadata.phenotype_to_color)
pdt.assert_equal(study.phenotype_to_marker,
study.metadata.phenotype_to_marker)
pdt.assert_series_equal(study.sample_id_to_phenotype,
study.metadata.sample_id_to_phenotype)
pdt.assert_series_equal(study.sample_id_to_color,
study.metadata.sample_id_to_color)
pdt.assert_numpy_array_equal(study.phenotype_transitions,
study.metadata.phenotype_transitions)
pdt.assert_numpy_array_equal(study.phenotype_color_ordered,
study.metadata.phenotype_color_order)
| pdt.assert_equal(study.default_sample_subset, 'all_samples') | pandas.util.testing.assert_equal |
import pandas as pd
import numpy as np
import jinja2
import math
import re
class Plate_Desc:
def __init__(self, title, descrip, serialnum, date_time, datafname):
self.title = title
self.descrip = descrip
self.serialnum = serialnum
self.date_time = date_time
self.datafname = datafname
def clear(self):
self.title = ""
self.descrip = ""
self.serialnum = ""
self.date_time = ""
self.datafname = ""
return self
def new(self):
self.__init__("", "", "", "", "" )
return self
# open the file
datafname = "Z:\Shared Folders\Data_Per\Prog\Proj-HaasMeas\Large_Top_755_772.out"
datafname_short = re.sub(r'\\.+\\', '', datafname)
datafname_short = re.sub(r'^(.*:)', '', datafname_short)
fin = open(datafname,"r")
rdplate_line = 0 #reading aline on a plate
plate_num = 0
plate_desc = Plate_Desc("","","","", datafname_short)
plate_hole_rec = [] # holds all the data for all the hole measurements on one plate
plate_hole_table = [] # all the holes for a single plate
plate_meas_table = [] # list of two dimension (plate desc + plate_holes_rec's)
hole_rec = []
hole_table = []
for line_in in fin:
line_out = ""
if line_in.find("%")>=0:
#nothing
line_out = ""
elif not line_in.strip():
#it was null
line_out = ""
elif line_in.find("()")>=0:
#it is the third line in
line_out = ""
else:
line_out=line_in.replace("\n","")
# anything but a blank line
if line_out != "":
if (rdplate_line==0):
if (line_out.find("HOLE ")>=0):
rdplate_line = 4 #there is another hole on the plate
else:
if plate_num ==0:
plate_num += 1
else:
#if not the first plate then must push to stack
plate_meas_rec = (plate_desc, plate_hole_table)
plate_meas_table.append(plate_meas_rec)
plate_desc = Plate_Desc("","","","", datafname_short)
plate_hole_table = []
plate_num += 1
# now, need to find out if a plate reading is in progress
if rdplate_line == 0:
#header
plate_desc = Plate_Desc("","","","", datafname_short)
rdplate_line = rdplate_line + 1
plate_desc.title = line_out.strip()
elif rdplate_line == 1:
#descrip #2
rdplate_line = rdplate_line + 1
plate_desc.descrip = line_out.strip()
elif rdplate_line == 2:
#serial number
if line_out.find("SERIAL")>= 0:
#it is serial number
plate_desc.serialnum = line_out.replace("SERIAL: ", "")
rdplate_line = rdplate_line + 1
elif rdplate_line == 3:
#time and date
tempstr = line_out.replace(" ", ",")
split_val_list = tempstr.split(",")
if len(split_val_list[1]) < 6:
split_val_list[1] = "0" + split_val_list[1]
date_str = split_val_list[0][2] + split_val_list[0][3] + "/" + split_val_list[0][4] + split_val_list[0][5] + "/" + "20" + split_val_list[0][0] + split_val_list[0][1]
time_str = split_val_list[1][0] + split_val_list[1][1] + ":" + split_val_list[1][2] + split_val_list[1][3] + ":" + split_val_list[1][4] + split_val_list[1][5]
plate_desc.date_time = date_str + " " + time_str
rdplate_line = rdplate_line + 1
elif rdplate_line == 4:
#hole number
if line_out.find("HOLE")>= 0:
#it is serial number
tempstr = line_out.replace("HOLE ", "")
plate_hole_rec = []
plate_hole_rec.append(tempstr)
rdplate_line = rdplate_line + 1
elif rdplate_line == 5:
#X pos
if line_out.find("X_TH")>= 0:
tempstr1 = line_out.replace("X_TH:", "")
tempstr2 = tempstr1.replace("X_MEA:", "")
tempstr3 = tempstr2.replace("X_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 6:
#Y pos
if line_out.find("Y_TH")>= 0:
tempstr1 = line_out.replace("Y_TH:", "")
tempstr2 = tempstr1.replace("Y_MEA:", "")
tempstr3 = tempstr2.replace("Y_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 7:
#Y pos
if line_out.find("Z_TH")>= 0:
tempstr1 = line_out.replace("Z_TH:", "")
tempstr2 = tempstr1.replace("Z_MEA:", "")
tempstr3 = tempstr2.replace("Z_DIF:", "")
tempstr4 = tempstr3.replace(" ", ",")
split_val_list = tempstr4.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
plate_hole_rec.append(split_val_list[2])
rdplate_line = rdplate_line + 1
elif rdplate_line == 8:
#DIAM
if line_out.find("DIAM")>= 0:
tempstr1 = line_out.replace("DIAM:", "")
tempstr2 = tempstr1.replace("DIA_ERR:", "")
tempstr3 = tempstr2.replace(" ", ",")
split_val_list = tempstr3.split(",")
plate_hole_rec.append(split_val_list[0])
plate_hole_rec.append(split_val_list[1])
# last number read. next line blank but will be chopped up on top
plate_hole_table.append(plate_hole_rec)
rdplate_line = 0
else:
print(plate_desc)
print(plate_meas)
rdplate_line = 0
#print (line_out)
# lines all read in, store the last record in memory
plate_meas_rec = (plate_desc, plate_hole_table)
plate_meas_table.append(plate_meas_rec)
num_holes = len(plate_meas_table[0][1])
num_plates = len(plate_meas_table)
# summary at the top
col1 = pd.Index(['plates', 'descrip', 'data file', 'start', 'stop', 'operator', '# plates', 'start s/n', 'stop s/n','# holes'])
col2 = pd.Index([plate_meas_table[0][0].title, plate_meas_table[0][0].descrip, plate_meas_table[0][0].datafname, plate_meas_table[0][0].date_time, plate_meas_table[len(plate_meas_table)-1][0].date_time, '<NAME>', str(len(plate_meas_table)).strip(), plate_meas_table[0][0].serialnum, plate_meas_table[len(plate_meas_table)-1][0].serialnum, str(num_holes).strip() ])
df_head = pd.DataFrame(col2, columns=[''], index=col1)
print(df_head)
def color_negative_red(val):
#color = 'red' if val < 0 else 'black'
color = 'black'
return f'color: {color}'
def color_spec_dif_01(val):
color = 'red' if float(val) > 0.001 else 'black'
return f'color: {color}'
def color_spec_dif_02(val): #warning if above 0.002
color = 'blue' if float(val) > 0.002 or float(val) < -0.002 else 'black'
return f'color: {color}'
def color_spec_dif_03(val): #red if above 0.002
color = 'red' if float(val) > 0.002 or float(val) < -0.002 else 'black'
return f'color: {color}'
def color_spec_dia(val):
color = 'red' if float(val) > 0.4910 or float(val) < 0.4900 else 'black'
return f'color: {color}'
head_styler = df_head.style.applymap(color_negative_red)
#create serial numbers
meas_tab_serial_num = pd.Index(['spec'], dtype='object')
meas_tab_num = pd.Index([' '])
i=0
for lp in plate_meas_table:
meas_tab_serial_num = meas_tab_serial_num.append(pd.Index([plate_meas_table[i][0].serialnum]))
i += 1
meas_tab_num = meas_tab_num.append(pd.Index([i]))
# GO THRU THE PLATES NOW
#create the dataframe first so that can append without creating a complex array and eating up memory
df_table_01a = pd.DataFrame( meas_tab_serial_num, columns=['PLATE'], index=meas_tab_num )
df_table_01b = pd.DataFrame( [], columns=[], index=[])
df_table_01c = pd.DataFrame( [], columns=[], index=[])
df_table_01d = pd.DataFrame( [], columns=[], index=[])
curr_table_num = 1
#loop thru all of the holes (outside loop for columns)
for i in range(num_holes):
Xcol=pd.Index([plate_meas_table[0][1][i][1]]) # X spec
Ycol=pd.Index([plate_meas_table[0][1][i][4]]) # Y spec
for j in range(num_plates):
Xcol = Xcol.append(pd.Index([plate_meas_table[j][1][i][2]] ))
Ycol = Ycol.append(pd.Index([plate_meas_table[j][1][i][5]] ))
# all plates read append to dataframe
if curr_table_num == 1:
df_table_01a['X'+str(i+1)] = Xcol
df_table_01a['Y'+str(i+1)] = Ycol
if i>=4 :
curr_table_num += 1
df_table_01b = pd.DataFrame( meas_tab_serial_num, columns=['PLATE'], index=meas_tab_num )
elif curr_table_num == 2:
df_table_01b['X'+str(i+1)] = Xcol
df_table_01b['Y'+str(i+1)] = Ycol
if i>=9 :
curr_table_num += 1
df_table_01c = pd.DataFrame( meas_tab_serial_num, columns=['PLATE'], index=meas_tab_num )
elif curr_table_num == 3:
df_table_01c['X'+str(i+1)] = Xcol
df_table_01c['Y'+str(i+1)] = Ycol
if i>=14 :
curr_table_num += 1
df_table_01d = pd.DataFrame( meas_tab_serial_num, columns=['PLATE'], index=meas_tab_num )
elif curr_table_num == 4:
df_table_01d['X'+str(i+1)] = Xcol
df_table_01d['Y'+str(i+1)] = Ycol
meas_01a_styler = df_table_01a.style.applymap(color_negative_red)
meas_01b_styler = df_table_01b.style.applymap(color_negative_red)
meas_01c_styler = df_table_01c.style.applymap(color_negative_red)
meas_01d_styler = df_table_01d.style.applymap(color_negative_red)
# for the diameter and Z positions
#create the dataframe first so that can append without creating a complex array and eating up memory
df_table_02a = pd.DataFrame( meas_tab_serial_num, columns=['PLATE'], index=meas_tab_num )
df_table_02b = pd.DataFrame( [], columns=[], index=[])
df_table_02c = pd.DataFrame( [], columns=[], index=[])
df_table_02d = pd.DataFrame( [], columns=[], index=[])
hole_dia_table = | pd.Index([]) | pandas.Index |
import numpy as np
import pandas as pd
import os, errno
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
from scipy.spatial.distance import squareform
from sklearn.decomposition.nmf import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
def save_df_to_npz(obj, filename):
np.savez_compressed(filename, data=obj.values, index=obj.index.values, columns=obj.columns.values)
def save_df_to_text(obj, filename):
obj.to_csv(filename, sep='\t')
def load_df_from_npz(filename):
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1,1))
D += squared_norms.reshape((1,-1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return(beta)
def fast_ols_all_cols_df(X,Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return(beta)
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean**2)
return(var)
def get_highvar_genes_sparse(expression, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy(); E2.data **= 2; gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean**2))
del(E2)
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var)/gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = ((gene_fano > w_fano_low) &
(gene_fano < w_fano_high) &
(gene_mean > w_mean_low) &
(gene_mean < w_mean_high))
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_mean + (B**2)
fano_ratio = (gene_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_mean,
'var': gene_var,
'fano': gene_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(input_counts, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = pd.Series(gene_counts_var/gene_counts_mean)
# Find parameters for expected fano line
top_genes = gene_counts_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_counts_var)/gene_counts_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_counts_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_counts_fano.quantile([0.10, 0.90])
winsor_box = ((gene_counts_fano > w_fano_low) &
(gene_counts_fano < w_fano_high) &
(gene_counts_mean > w_mean_low) &
(gene_counts_mean < w_mean_high))
fano_median = gene_counts_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_counts_mean + (B**2)
fano_ratio = (gene_counts_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_counts_mean,
'var': gene_counts_var,
'fano': gene_counts_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def compute_tpm(input_counts):
"""
Default TPM normalization
"""
tpm = input_counts.copy()
sc.pp.normalize_per_cell(tpm, counts_per_cell_after=1e6)
return(tpm)
class cNMF():
def __init__(self, output_dir=".", name=None):
"""
Parameters
----------
output_dir : path, optional (default=".")
Output directory for analysis files.
name : string, optional (default=None)
A name for this analysis. Will be prefixed to all output files.
If set to None, will be automatically generated from date (and random string).
"""
self.output_dir = output_dir
if name is None:
now = datetime.datetime.now()
rand_hash = uuid.uuid4().hex[:6]
name = '%s_%s' % (now.strftime("%Y_%m_%d"), rand_hash)
self.name = name
self.paths = None
def _initialize_dirs(self):
if self.paths is None:
# Check that output directory exists, create it if needed.
check_dir_exists(self.output_dir)
check_dir_exists(os.path.join(self.output_dir, self.name))
check_dir_exists(os.path.join(self.output_dir, self.name, 'cnmf_tmp'))
self.paths = {
'normalized_counts' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.norm_counts.h5ad'),
'nmf_replicate_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_params.df.npz'),
'nmf_run_parameters' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.nmf_idvrun_params.yaml'),
'nmf_genes_list' : os.path.join(self.output_dir, self.name, self.name+'.overdispersed_genes.txt'),
'tpm' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm.h5ad'),
'tpm_stats' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.tpm_stats.df.npz'),
'iter_spectra' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.iter_%d.df.npz'),
'iter_usages' : os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.usages.k_%d.iter_%d.df.npz'),
'merged_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.merged.df.npz'),
'local_density_cache': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.local_density_cache.k_%d.merged.df.npz'),
'consensus_spectra': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.spectra.k_%d.dt_%s.consensus.df.npz'),
'consensus_spectra__txt': os.path.join(self.output_dir, self.name, self.name+'.spectra.k_%d.dt_%s.consensus.txt'),
'consensus_usages': os.path.join(self.output_dir, self.name, 'cnmf_tmp',self.name+'.usages.k_%d.dt_%s.consensus.df.npz'),
'consensus_usages__txt': os.path.join(self.output_dir, self.name, self.name+'.usages.k_%d.dt_%s.consensus.txt'),
'consensus_stats': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.stats.k_%d.dt_%s.df.npz'),
'clustering_plot': os.path.join(self.output_dir, self.name, self.name+'.clustering.k_%d.dt_%s.png'),
'gene_spectra_score': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_score.k_%d.dt_%s.df.npz'),
'gene_spectra_score__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_score.k_%d.dt_%s.txt'),
'gene_spectra_tpm': os.path.join(self.output_dir, self.name, 'cnmf_tmp', self.name+'.gene_spectra_tpm.k_%d.dt_%s.df.npz'),
'gene_spectra_tpm__txt': os.path.join(self.output_dir, self.name, self.name+'.gene_spectra_tpm.k_%d.dt_%s.txt'),
'k_selection_plot' : os.path.join(self.output_dir, self.name, self.name+'.k_selection.png'),
'k_selection_stats' : os.path.join(self.output_dir, self.name, self.name+'.k_selection_stats.df.npz'),
}
def get_norm_counts(self, counts, tpm,
high_variance_genes_filter = None,
num_highvar_genes = None
):
"""
Parameters
----------
counts : anndata.AnnData
Scanpy AnnData object (cells x genes) containing raw counts. Filtered such that
no genes or cells with 0 counts
tpm : anndata.AnnData
Scanpy AnnData object (cells x genes) containing tpm normalized data matching
counts
high_variance_genes_filter : np.array, optional (default=None)
A pre-specified list of genes considered to be high-variance.
Only these genes will be used during factorization of the counts matrix.
Must match the .var index of counts and tpm.
If set to None, high-variance genes will be automatically computed, using the
parameters below.
num_highvar_genes : int, optional (default=None)
Instead of providing an array of high-variance genes, identify this many most overdispersed genes
for filtering
Returns
-------
normcounts : anndata.AnnData, shape (cells, num_highvar_genes)
A counts matrix containing only the high variance genes and with columns (genes)normalized to unit
variance
"""
if high_variance_genes_filter is None:
## Get list of high-var genes if one wasn't provided
if sp.issparse(tpm.X):
(gene_counts_stats, gene_fano_params) = get_highvar_genes_sparse(tpm.X, numgenes=num_highvar_genes)
else:
(gene_counts_stats, gene_fano_params) = get_highvar_genes(np.array(tpm.X), numgenes=num_highvar_genes)
high_variance_genes_filter = list(tpm.var.index[gene_counts_stats.high_var.values])
## Subset out high-variance genes
norm_counts = counts[:, high_variance_genes_filter]
## Scale genes to unit variance
if sp.issparse(tpm.X):
sc.pp.scale(norm_counts, zero_center=False)
if np.isnan(norm_counts.X.data).sum() > 0:
print('Warning NaNs in normalized counts matrix')
else:
norm_counts.X /= norm_counts.X.std(axis=0, ddof=1)
if np.isnan(norm_counts.X).sum().sum() > 0:
print('Warning NaNs in normalized counts matrix')
## Save a \n-delimited list of the high-variance genes used for factorization
open(self.paths['nmf_genes_list'], 'w').write('\n'.join(high_variance_genes_filter))
## Check for any cells that have 0 counts of the overdispersed genes
zerocells = norm_counts.X.sum(axis=1)==0
if zerocells.sum()>0:
examples = norm_counts.obs.index[zerocells]
print('Warning: %d cells have zero counts of overdispersed genes. E.g. %s' % (zerocells.sum(), examples[0]))
print('Consensus step may not run when this is the case')
return(norm_counts)
def save_norm_counts(self, norm_counts):
self._initialize_dirs()
sc.write(self.paths['normalized_counts'], norm_counts)
def get_nmf_iter_params(self, ks, n_iter = 100,
random_state_seed = None,
beta_loss = 'kullback-leibler'):
"""
Create a DataFrame with parameters for NMF iterations.
Parameters
----------
ks : integer, or list-like.
Number of topics (components) for factorization.
Several values can be specified at the same time, which will be run independently.
n_iter : integer, optional (defailt=100)
Number of iterations for factorization. If several ``k`` are specified, this many
iterations will be run for each value of ``k``.
random_state_seed : int or None, optional (default=None)
Seed for sklearn random state.
"""
if type(ks) is int:
ks = [ks]
# Remove any repeated k values, and order.
k_list = sorted(set(list(ks)))
n_runs = len(ks)* n_iter
np.random.seed(seed=random_state_seed)
nmf_seeds = np.random.randint(low=1, high=(2**32)-1, size=n_runs)
replicate_params = []
for i, (k, r) in enumerate(itertools.product(k_list, range(n_iter))):
replicate_params.append([k, r, nmf_seeds[i]])
replicate_params = pd.DataFrame(replicate_params, columns = ['n_components', 'iter', 'nmf_seed'])
_nmf_kwargs = dict(
alpha=0.0,
l1_ratio=0.0,
beta_loss=beta_loss,
solver='mu',
tol=1e-4,
max_iter=400,
regularization=None,
init='random'
)
## Coordinate descent is faster than multiplicative update but only works for frobenius
if beta_loss == 'frobenius':
_nmf_kwargs['solver'] = 'cd'
return(replicate_params, _nmf_kwargs)
def save_nmf_iter_params(self, replicate_params, run_params):
self._initialize_dirs()
save_df_to_npz(replicate_params, self.paths['nmf_replicate_parameters'])
with open(self.paths['nmf_run_parameters'], 'w') as F:
yaml.dump(run_params, F)
def _nmf(self, X, nmf_kwargs):
"""
Parameters
----------
X : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
nmf_kwargs : dict,
Arguments to be passed to ``non_negative_factorization``
"""
(usages, spectra, niter) = non_negative_factorization(X, **nmf_kwargs)
return(spectra, usages)
def run_nmf(self,
worker_i=1, total_workers=1,
):
"""
Iteratively run NMF with prespecified parameters.
Use the `worker_i` and `total_workers` parameters for parallelization.
Generic kwargs for NMF are loaded from self.paths['nmf_run_parameters'], defaults below::
``non_negative_factorization`` default arguments:
alpha=0.0
l1_ratio=0.0
beta_loss='kullback-leibler'
solver='mu'
tol=1e-4,
max_iter=200
regularization=None
init='random'
random_state, n_components are both set by the prespecified self.paths['nmf_replicate_parameters'].
Parameters
----------
norm_counts : pandas.DataFrame,
Normalized counts dataFrame to be factorized.
(Output of ``normalize_counts``)
run_params : pandas.DataFrame,
Parameters for NMF iterations.
(Output of ``prepare_nmf_iter_params``)
"""
self._initialize_dirs()
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
norm_counts = sc.read(self.paths['normalized_counts'])
_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
jobs_for_this_worker = worker_filter(range(len(run_params)), worker_i, total_workers)
for idx in jobs_for_this_worker:
p = run_params.iloc[idx, :]
print('[Worker %d]. Starting task %d.' % (worker_i, idx))
_nmf_kwargs['random_state'] = p['nmf_seed']
_nmf_kwargs['n_components'] = p['n_components']
(spectra, usages) = self._nmf(norm_counts.X, _nmf_kwargs)
spectra = pd.DataFrame(spectra,
index=np.arange(1, _nmf_kwargs['n_components']+1),
columns=norm_counts.var.index)
save_df_to_npz(spectra, self.paths['iter_spectra'] % (p['n_components'], p['iter']))
def combine_nmf(self, k, remove_individual_iterations=False):
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
print('Combining factorizations for k=%d.'%k)
self._initialize_dirs()
combined_spectra = None
n_iter = sum(run_params.n_components==k)
run_params_subset = run_params[run_params.n_components==k].sort_values('iter')
spectra_labels = []
for i,p in run_params_subset.iterrows():
spectra = load_df_from_npz(self.paths['iter_spectra'] % (p['n_components'], p['iter']))
if combined_spectra is None:
combined_spectra = np.zeros((n_iter, k, spectra.shape[1]))
combined_spectra[p['iter'], :, :] = spectra.values
for t in range(k):
spectra_labels.append('iter%d_topic%d'%(p['iter'], t+1))
combined_spectra = combined_spectra.reshape(-1, combined_spectra.shape[-1])
combined_spectra = pd.DataFrame(combined_spectra, columns=spectra.columns, index=spectra_labels)
save_df_to_npz(combined_spectra, self.paths['merged_spectra']%k)
return combined_spectra
def consensus(self, k, density_threshold_str='0.5', local_neighborhood_size = 0.30,show_clustering = False,
skip_density_and_return_after_stats = False, close_clustergram_fig=True):
merged_spectra = load_df_from_npz(self.paths['merged_spectra']%k)
norm_counts = sc.read(self.paths['normalized_counts'])
if skip_density_and_return_after_stats:
density_threshold_str = '2'
density_threshold_repl = density_threshold_str.replace('.', '_')
density_threshold = float(density_threshold_str)
n_neighbors = int(local_neighborhood_size * merged_spectra.shape[0]/k)
# Rescale topics such to length of 1.
l2_spectra = (merged_spectra.T/np.sqrt((merged_spectra**2).sum(axis=1))).T
if not skip_density_and_return_after_stats:
# Compute the local density matrix (if not previously cached)
topics_dist = None
if os.path.isfile(self.paths['local_density_cache'] % k):
local_density = load_df_from_npz(self.paths['local_density_cache'] % k)
else:
# first find the full distance matrix
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# partition based on the first n neighbors
partitioning_order = np.argpartition(topics_dist, n_neighbors+1)[:, :n_neighbors+1]
# find the mean over those n_neighbors (excluding self, which has a distance of 0)
distance_to_nearest_neighbors = topics_dist[np.arange(topics_dist.shape[0])[:, None], partitioning_order]
local_density = pd.DataFrame(distance_to_nearest_neighbors.sum(1)/(n_neighbors),
columns=['local_density'],
index=l2_spectra.index)
save_df_to_npz(local_density, self.paths['local_density_cache'] % k)
del(partitioning_order)
del(distance_to_nearest_neighbors)
density_filter = local_density.iloc[:, 0] < density_threshold
l2_spectra = l2_spectra.loc[density_filter, :]
kmeans_model = KMeans(n_clusters=k, n_init=10, random_state=1)
kmeans_model.fit(l2_spectra)
kmeans_cluster_labels = pd.Series(kmeans_model.labels_+1, index=l2_spectra.index)
# Find median usage for each gene across cluster
median_spectra = l2_spectra.groupby(kmeans_cluster_labels).median()
# Normalize median spectra to probability distributions.
median_spectra = (median_spectra.T/median_spectra.sum(1)).T
# Compute the silhouette score
stability = silhouette_score(l2_spectra.values, kmeans_cluster_labels, metric='euclidean')
# Obtain the reconstructed count matrix by re-fitting the usage matrix and computing the dot product: usage.dot(spectra)
refit_nmf_kwargs = yaml.load(open(self.paths['nmf_run_parameters']), Loader=yaml.FullLoader)
refit_nmf_kwargs.update(dict(
n_components = k,
H = median_spectra.values,
update_H = False
))
_, rf_usages = self._nmf(norm_counts.X,
nmf_kwargs=refit_nmf_kwargs)
rf_usages = pd.DataFrame(rf_usages, index=norm_counts.obs.index, columns=median_spectra.index)
rf_pred_norm_counts = rf_usages.dot(median_spectra)
# Compute prediction error as a frobenius norm
if sp.issparse(norm_counts.X):
prediction_error = ((norm_counts.X.todense() - rf_pred_norm_counts)**2).sum().sum()
else:
prediction_error = ((norm_counts.X - rf_pred_norm_counts)**2).sum().sum()
consensus_stats = pd.DataFrame([k, density_threshold, stability, prediction_error],
index = ['k', 'local_density_threshold', 'stability', 'prediction_error'],
columns = ['stats'])
if skip_density_and_return_after_stats:
return consensus_stats
save_df_to_npz(median_spectra, self.paths['consensus_spectra']%(k, density_threshold_repl))
save_df_to_npz(rf_usages, self.paths['consensus_usages']%(k, density_threshold_repl))
save_df_to_npz(consensus_stats, self.paths['consensus_stats']%(k, density_threshold_repl))
save_df_to_text(median_spectra, self.paths['consensus_spectra__txt']%(k, density_threshold_repl))
save_df_to_text(rf_usages, self.paths['consensus_usages__txt']%(k, density_threshold_repl))
# Compute gene-scores for each GEP by regressing usage on Z-scores of TPM
tpm = sc.read(self.paths['tpm'])
tpm_stats = load_df_from_npz(self.paths['tpm_stats'])
if sp.issparse(tpm.X):
norm_tpm = (np.array(tpm.X.todense()) - tpm_stats['__mean'].values) / tpm_stats['__std'].values
else:
norm_tpm = (tpm.X - tpm_stats['__mean'].values) / tpm_stats['__std'].values
usage_coef = fast_ols_all_cols(rf_usages.values, norm_tpm)
usage_coef = pd.DataFrame(usage_coef, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(usage_coef, self.paths['gene_spectra_score']%(k, density_threshold_repl))
save_df_to_text(usage_coef, self.paths['gene_spectra_score__txt']%(k, density_threshold_repl))
# Convert spectra to TPM units, and obtain results for all genes by running last step of NMF
# with usages fixed and TPM as the input matrix
norm_usages = rf_usages.div(rf_usages.sum(axis=1), axis=0)
refit_nmf_kwargs.update(dict(
H = norm_usages.T.values,
))
_, spectra_tpm = self._nmf(tpm.X.T, nmf_kwargs=refit_nmf_kwargs)
spectra_tpm = pd.DataFrame(spectra_tpm.T, index=rf_usages.columns, columns=tpm.var.index)
save_df_to_npz(spectra_tpm, self.paths['gene_spectra_tpm']%(k, density_threshold_repl))
save_df_to_text(spectra_tpm, self.paths['gene_spectra_tpm__txt']%(k, density_threshold_repl))
if show_clustering:
if topics_dist is None:
topics_dist = squareform(fast_euclidean(l2_spectra.values))
# (l2_spectra was already filtered using the density filter)
else:
# (but the previously computed topics_dist was not!)
topics_dist = topics_dist[density_filter.values, :][:, density_filter.values]
spectra_order = []
for cl in sorted(set(kmeans_cluster_labels)):
cl_filter = kmeans_cluster_labels==cl
if cl_filter.sum() > 1:
cl_dist = squareform(topics_dist[cl_filter, :][:, cl_filter])
cl_dist[cl_dist < 0] = 0 #Rarely get floating point arithmetic issues
cl_link = linkage(cl_dist, 'average')
cl_leaves_order = leaves_list(cl_link)
spectra_order += list(np.where(cl_filter)[0][cl_leaves_order])
else:
## Corner case where a component only has one element
spectra_order += list(np.where(cl_filter)[0])
from matplotlib import gridspec
import matplotlib.pyplot as plt
width_ratios = [0.5, 9, 0.5, 4, 1]
height_ratios = [0.5, 9]
fig = plt.figure(figsize=(sum(width_ratios), sum(height_ratios)))
gs = gridspec.GridSpec(len(height_ratios), len(width_ratios), fig,
0.01, 0.01, 0.98, 0.98,
height_ratios=height_ratios,
width_ratios=width_ratios,
wspace=0, hspace=0)
dist_ax = fig.add_subplot(gs[1,1], xscale='linear', yscale='linear',
xticks=[], yticks=[],xlabel='', ylabel='',
frameon=True)
D = topics_dist[spectra_order, :][:, spectra_order]
dist_im = dist_ax.imshow(D, interpolation='none', cmap='viridis', aspect='auto',
rasterized=True)
left_ax = fig.add_subplot(gs[1,0], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
left_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(-1, 1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
top_ax = fig.add_subplot(gs[0,1], xscale='linear', yscale='linear', xticks=[], yticks=[],
xlabel='', ylabel='', frameon=True)
top_ax.imshow(kmeans_cluster_labels.values[spectra_order].reshape(1, -1),
interpolation='none', cmap='Spectral', aspect='auto',
rasterized=True)
hist_gs = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[1, 3],
wspace=0, hspace=0)
hist_ax = fig.add_subplot(hist_gs[0,0], xscale='linear', yscale='linear',
xlabel='', ylabel='', frameon=True, title='Local density histogram')
hist_ax.hist(local_density.values, bins=np.linspace(0, 1, 50))
hist_ax.yaxis.tick_right()
xlim = hist_ax.get_xlim()
ylim = hist_ax.get_ylim()
if density_threshold < xlim[1]:
hist_ax.axvline(density_threshold, linestyle='--', color='k')
hist_ax.text(density_threshold + 0.02, ylim[1] * 0.95, 'filtering\nthreshold\n\n', va='top')
hist_ax.set_xlim(xlim)
hist_ax.set_xlabel('Mean distance to k nearest neighbors\n\n%d/%d (%.0f%%) spectra above threshold\nwere removed prior to clustering'%(sum(~density_filter), len(density_filter), 100*(~density_filter).mean()))
fig.savefig(self.paths['clustering_plot']%(k, density_threshold_repl), dpi=250)
if close_clustergram_fig:
plt.close(fig)
def k_selection_plot(self, close_fig=True):
'''
Borrowed from <NAME>. 2013 Deciphering Mutational Signatures
publication in Cell Reports
'''
run_params = load_df_from_npz(self.paths['nmf_replicate_parameters'])
stats = []
for k in sorted(set(run_params.n_components)):
stats.append(self.consensus(k, skip_density_and_return_after_stats=True).stats)
stats = pd.DataFrame(stats)
stats.reset_index(drop = True, inplace = True)
save_df_to_npz(stats, self.paths['k_selection_stats'])
fig = plt.figure(figsize=(6, 4))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(stats.k, stats.stability, 'o-', color='b')
ax1.set_ylabel('Stability', color='b', fontsize=15)
for tl in ax1.get_yticklabels():
tl.set_color('b')
#ax1.set_xlabel('K', fontsize=15)
ax2.plot(stats.k, stats.prediction_error, 'o-', color='r')
ax2.set_ylabel('Error', color='r', fontsize=15)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.set_xlabel('Number of Components', fontsize=15)
ax1.grid('on')
plt.tight_layout()
fig.savefig(self.paths['k_selection_plot'], dpi=250)
if close_fig:
plt.close(fig)
if __name__=="__main__":
"""
Example commands for now:
output_dir="/Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/"
python cnmf.py prepare --output-dir $output_dir \
--name test --counts /Users/averes/Projects/Melton/Notebooks/2018/07-2018/cnmf_test/test_data.df.npz \
-k 6 7 8 9 --n-iter 5
python cnmf.py factorize --name test --output-dir $output_dir
THis can be parallelized as such:
python cnmf.py factorize --name test --output-dir $output_dir --total-workers 2 --worker-index WORKER_INDEX (where worker_index starts with 0)
python cnmf.py combine --name test --output-dir $output_dir
python cnmf.py consensus --name test --output-dir $output_dir
"""
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('command', type=str, choices=['prepare', 'factorize', 'combine', 'consensus', 'k_selection_plot'])
parser.add_argument('--name', type=str, help='[all] Name for analysis. All output will be placed in [output-dir]/[name]/...', nargs='?', default='cNMF')
parser.add_argument('--output-dir', type=str, help='[all] Output directory. All output will be placed in [output-dir]/[name]/...', nargs='?', default='.')
parser.add_argument('-c', '--counts', type=str, help='[prepare] Input (cell x gene) counts matrix as df.npz or tab delimited text file')
parser.add_argument('-k', '--components', type=int, help='[prepare] Numper of components (k) for matrix factorization. Several can be specified with "-k 8 9 10"', nargs='+')
parser.add_argument('-n', '--n-iter', type=int, help='[prepare] Numper of factorization replicates', default=100)
parser.add_argument('--total-workers', type=int, help='[all] Total number of workers to distribute jobs to', default=1)
parser.add_argument('--seed', type=int, help='[prepare] Seed for pseudorandom number generation', default=None)
parser.add_argument('--genes-file', type=str, help='[prepare] File containing a list of genes to include, one gene per line. Must match column labels of counts matrix.', default=None)
parser.add_argument('--numgenes', type=int, help='[prepare] Number of high variance genes to use for matrix factorization.', default=2000)
parser.add_argument('--tpm', type=str, help='[prepare] Pre-computed (cell x gene) TPM values as df.npz or tab separated txt file. If not provided TPM will be calculated automatically', default=None)
parser.add_argument('--beta-loss', type=str, choices=['frobenius', 'kullback-leibler', 'itakura-saito'], help='[prepare] Loss function for NMF.', default='frobenius')
parser.add_argument('--densify', dest='densify', help='[prepare] Treat the input data as non-sparse', action='store_true', default=False)
parser.add_argument('--worker-index', type=int, help='[factorize] Index of current worker (the first worker should have index 0)', default=0)
parser.add_argument('--local-density-threshold', type=str, help='[consensus] Threshold for the local density filtering. This string must convert to a float >0 and <=2', default='0.5')
parser.add_argument('--local-neighborhood-size', type=float, help='[consensus] Fraction of the number of replicates to use as nearest neighbors for local density filtering', default=0.30)
parser.add_argument('--show-clustering', dest='show_clustering', help='[consensus] Produce a clustergram figure summarizing the spectra clustering', action='store_true')
args = parser.parse_args()
cnmf_obj = cNMF(output_dir=args.output_dir, name=args.name)
cnmf_obj._initialize_dirs()
if args.command == 'prepare':
if args.counts.endswith('.h5ad'):
input_counts = sc.read(args.counts)
else:
## Load txt or compressed dataframe and convert to scanpy object
if args.counts.endswith('.npz'):
input_counts = load_df_from_npz(args.counts)
else:
input_counts = pd.read_csv(args.counts, sep='\t', index_col=0)
if args.densify:
input_counts = sc.AnnData(X=input_counts.values,
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns))
else:
input_counts = sc.AnnData(X=sp.csr_matrix(input_counts.values),
obs=pd.DataFrame(index=input_counts.index),
var=pd.DataFrame(index=input_counts.columns))
if sp.issparse(input_counts.X) & args.densify:
input_counts.X = np.array(input_counts.X.todense())
if args.tpm is None:
tpm = compute_tpm(input_counts)
sc.write(cnmf_obj.paths['tpm'], tpm)
elif args.tpm.endswith('.h5ad'):
subprocess.call('cp %s %s' % (args.tpm, cnmf_obj.paths['tpm']), shell=True)
tpm = sc.read(cnmf_obj.paths['tpm'])
else:
if args.tpm.endswith('.npz'):
tpm = load_df_from_npz(args.tpm)
else:
tpm = pd.read_csv(args.tpm, sep='\t', index_col=0)
if args.densify:
tpm = sc.AnnData(X=tpm.values,
obs= | pd.DataFrame(index=tpm.index) | pandas.DataFrame |
import pandas as pd
import datetime
import numpy as np
from tpau_gtfsutilities.gtfs.gtfssingleton import gtfs as gtfs_singleton
from tpau_gtfsutilities.helpers.datetimehelpers import seconds_since_zero
from tpau_gtfsutilities.helpers.datetimehelpers import seconds_to_military
def get_trip_duration_seconds(gtfs_override=None, trip_bounds=None):
# returns trip duration series 'duration_seconds'
gtfs = gtfs_override if gtfs_override else gtfs_singleton
trip_bounds = trip_bounds if trip_bounds is not None else get_trip_bounds(gtfs_override=gtfs)
trip_durations_df = trip_bounds.assign( \
duration_seconds = \
trip_bounds['end_time'].transform(seconds_since_zero) \
- trip_bounds['start_time'].transform(seconds_since_zero) \
)
return trip_durations_df['duration_seconds']
def get_trip_bounds(gtfs_override=None, original=False):
# returns trip bounds dataframe
# index: trip_id
# columns: start_time, end_time
gtfs = gtfs_override if gtfs_override else gtfs_singleton
stop_times = gtfs.get_table('stop_times', original=original)
def min_miliary_arrival_time(grouped):
trip_id = grouped.name
grouped_df = grouped.to_frame()
grouped_df = grouped_df[grouped_df[trip_id] != ''] \
.assign(seconds_since_zero = lambda df: df[trip_id].transform(lambda t: seconds_since_zero(t)))
idx_of_min = grouped_df['seconds_since_zero'].idxmin(axis=0)
return grouped_df.loc[idx_of_min, trip_id]
def max_miliary_arrival_time(grouped):
trip_id = grouped.name
grouped_df = grouped.to_frame()
grouped_df = grouped_df[grouped_df[trip_id] != ''] \
.assign(seconds_since_zero = lambda df: df[trip_id].transform(lambda t: seconds_since_zero(t)))
idx_of_max = grouped_df['seconds_since_zero'].idxmax(axis=0)
return grouped_df.loc[idx_of_max, trip_id]
grouped_arrival_times = stop_times[stop_times['arrival_time'].notnull()].groupby('trip_id')['arrival_time']
min_arrival_times = grouped_arrival_times \
.agg(min_miliary_arrival_time) \
.rename('start_time')
max_arrival_times = grouped_arrival_times \
.agg(max_miliary_arrival_time) \
.rename('end_time')
return | pd.concat([min_arrival_times, max_arrival_times], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from multiprocess import Pool
import simplejson as json
import six
import sys
from cytoolz import compose
import numpy as np
import pandas as pd
import h5py
from ._util import parse_bins, parse_kv_list_param, parse_field_param
from . import cli, get_logger
import click
from ..create import (
create_cooler,
sanitize_records, aggregate_records,
TabixAggregator, HDF5Aggregator, PairixAggregator,
)
_pandas_version = | pd.__version__.split('.') | pandas.__version__.split |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/Python-2.7.11
# Authors of this page : <NAME> & <NAME>
from Tools import connect
import pandas as pandas
from Tools import UserLogs
# Return one or several phenotypes significant with the input
def getSignigicantPhenotypes(type, value):
phenotypes = | pandas.DataFrame() | pandas.DataFrame |
# Author: <NAME> <<EMAIL>>
#
# License: Apache Software License 2.0
"""Tests for Drift package."""
import numpy as np
import pandas as pd
import plotly.graph_objects
import pytest
from sklearn.impute import SimpleImputer
from nannyml.chunk import CountBasedChunker, DefaultChunker, PeriodBasedChunker, SizeBasedChunker
from nannyml.drift import DriftCalculator
from nannyml.drift.base import DriftResult
from nannyml.drift.model_inputs.multivariate.data_reconstruction.calculator import (
DataReconstructionDriftCalculator,
_minimum_chunk_size,
)
from nannyml.drift.model_inputs.univariate.statistical.calculator import UnivariateStatisticalDriftCalculator
from nannyml.exceptions import InvalidArgumentsException, MissingMetadataException
from nannyml.metadata import extract_metadata
from nannyml.metadata.base import NML_METADATA_COLUMNS, FeatureType
from nannyml.preprocessing import preprocess
@pytest.fixture
def sample_drift_data() -> pd.DataFrame: # noqa: D103
data = pd.DataFrame(pd.date_range(start='1/6/2020', freq='10min', periods=20 * 1008), columns=['timestamp'])
data['week'] = data.timestamp.dt.isocalendar().week - 1
data['partition'] = 'reference'
data.loc[data.week >= 11, ['partition']] = 'analysis'
# data[NML_METADATA_PARTITION_COLUMN_NAME] = data['partition'] # simulate preprocessing
np.random.seed(167)
data['f1'] = np.random.randn(data.shape[0])
data['f2'] = np.random.rand(data.shape[0])
data['f3'] = np.random.randint(4, size=data.shape[0])
data['f4'] = np.random.randint(20, size=data.shape[0])
data['y_pred_proba'] = np.random.rand(data.shape[0])
data['output'] = np.random.randint(2, size=data.shape[0])
data['actual'] = np.random.randint(2, size=data.shape[0])
# Rule 1b is the shifted feature, 75% 0 instead of 50%
rule1a = {2: 0, 3: 1}
rule1b = {2: 0, 3: 0}
data.loc[data.week < 16, ['f3']] = data.loc[data.week < 16, ['f3']].replace(rule1a)
data.loc[data.week >= 16, ['f3']] = data.loc[data.week >= 16, ['f3']].replace(rule1b)
# Rule 2b is the shifted feature
c1 = 'white'
c2 = 'red'
c3 = 'green'
c4 = 'blue'
rule2a = {
0: c1,
1: c1,
2: c1,
3: c1,
4: c1,
5: c2,
6: c2,
7: c2,
8: c2,
9: c2,
10: c3,
11: c3,
12: c3,
13: c3,
14: c3,
15: c4,
16: c4,
17: c4,
18: c4,
19: c4,
}
rule2b = {
0: c1,
1: c1,
2: c1,
3: c1,
4: c1,
5: c2,
6: c2,
7: c2,
8: c2,
9: c2,
10: c3,
11: c3,
12: c3,
13: c1,
14: c1,
15: c4,
16: c4,
17: c4,
18: c1,
19: c2,
}
data.loc[data.week < 16, ['f4']] = data.loc[data.week < 16, ['f4']].replace(rule2a)
data.loc[data.week >= 16, ['f4']] = data.loc[data.week >= 16, ['f4']].replace(rule2b)
data.loc[data.week >= 16, ['f1']] = data.loc[data.week >= 16, ['f1']] + 0.6
data.loc[data.week >= 16, ['f2']] = np.sqrt(data.loc[data.week >= 16, ['f2']])
data.drop(columns=['week'], inplace=True)
return data
@pytest.fixture
def sample_drift_data_with_nans(sample_drift_data) -> pd.DataFrame: # noqa: D103
data = sample_drift_data.copy(deep=True)
data['id'] = data.index
nan_pick1 = set(data.id.sample(frac=0.11, random_state=13))
nan_pick2 = set(data.id.sample(frac=0.11, random_state=14))
data.loc[data.id.isin(nan_pick1), 'f1'] = np.NaN
data.loc[data.id.isin(nan_pick2), 'f4'] = np.NaN
data.drop(columns=['id'], inplace=True)
return data
@pytest.fixture
def sample_drift_metadata(sample_drift_data): # noqa: D103
return extract_metadata(sample_drift_data, model_name='model', model_type='classification_binary')
class SimpleDriftResult(DriftResult):
"""Dummy DriftResult implementation."""
def plot(self, *args, **kwargs) -> plotly.graph_objects.Figure:
"""Fake plot."""
pass
class SimpleDriftCalculator(DriftCalculator):
"""Dummy DriftCalculator implementation that returns a DataFrame with the selected feature columns, no rows."""
def fit(self, reference_data: pd.DataFrame) -> DriftCalculator: # noqa: D102
_ = preprocess(reference_data, self.model_metadata, reference=True)
return self
def calculate( # noqa: D102
self,
data: pd.DataFrame,
) -> SimpleDriftResult:
data = preprocess(data, self.model_metadata)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
df = chunks[0].data.drop(columns=NML_METADATA_COLUMNS)
return SimpleDriftResult(
analysis_data=chunks, drift_data=pd.DataFrame(columns=df.columns), model_metadata=self.model_metadata
)
def test_base_drift_calculator_given_empty_reference_data_should_raise_invalid_args_exception( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = pd.DataFrame(columns=sample_drift_data.columns)
calc = SimpleDriftCalculator(sample_drift_metadata)
with pytest.raises(InvalidArgumentsException):
calc.fit(ref_data)
def test_base_drift_calculator_given_empty_analysis_data_should_raise_invalid_args_exception( # noqa: D103
sample_drift_data, sample_drift_metadata
):
calc = SimpleDriftCalculator(sample_drift_metadata, chunk_size=1000)
with pytest.raises(InvalidArgumentsException):
calc.calculate(data=pd.DataFrame(columns=sample_drift_data.columns))
def test_base_drift_calculator_given_empty_features_list_should_calculate_for_all_features( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = SimpleDriftCalculator(sample_drift_metadata, chunk_size=1000).fit(ref_data)
sut = calc.calculate(data=sample_drift_data)
md = extract_metadata(sample_drift_data, model_name='model', model_type='classification_binary')
assert len(sut.data.columns) == len(md.features)
for f in md.features:
assert f.column_name in sut.data.columns
def test_base_drift_calculator_given_non_empty_features_list_should_only_calculate_for_these_features( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = SimpleDriftCalculator(sample_drift_metadata, features=['f1', 'f3'], chunk_size=1000).fit(ref_data)
_ = calc.calculate(data=sample_drift_data)
sut = calc.calculate(data=sample_drift_data)
assert len(sut.data.columns) == 2
assert 'f1' in sut.data.columns
assert 'f3' in sut.data.columns
def test_base_drift_calculator_uses_size_based_chunker_when_given_chunk_size( # noqa: D103
sample_drift_data, sample_drift_metadata
):
class TestDriftCalculator(DriftCalculator):
def fit(self, reference_data: pd.DataFrame) -> DriftCalculator:
return self
def calculate(self, data: pd.DataFrame) -> pd.DataFrame:
data = preprocess(data, self.model_metadata)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_keys = [c.key for c in chunks]
return pd.DataFrame({'keys': chunk_keys})
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = TestDriftCalculator(sample_drift_metadata, chunk_size=1000).fit(ref_data)
sut = calc.calculate(sample_drift_data)['keys']
expected = [
c.key
for c in SizeBasedChunker(1000).split(sample_drift_metadata.enrich(sample_drift_data), minimum_chunk_size=1)
]
assert len(expected) == len(sut)
assert sorted(expected) == sorted(sut)
def test_base_drift_calculator_uses_count_based_chunker_when_given_chunk_number( # noqa: D103
sample_drift_data, sample_drift_metadata
):
class TestDriftCalculator(DriftCalculator):
def fit(self, reference_data: pd.DataFrame) -> DriftCalculator:
self._suggested_minimum_chunk_size = 50
return self
def calculate(self, data: pd.DataFrame) -> pd.DataFrame:
data = preprocess(data, self.model_metadata)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_keys = [c.key for c in chunks]
return pd.DataFrame({'keys': chunk_keys})
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = TestDriftCalculator(sample_drift_metadata, chunk_number=100).fit(ref_data)
sut = calc.calculate(sample_drift_data)['keys']
assert 101 == len(sut)
def test_base_drift_calculator_uses_period_based_chunker_when_given_chunk_period( # noqa: D103
sample_drift_data, sample_drift_metadata
):
class TestDriftCalculator(DriftCalculator):
def fit(self, reference_data: pd.DataFrame) -> DriftCalculator:
return self
def calculate(self, data: pd.DataFrame) -> pd.DataFrame:
data = preprocess(data, self.model_metadata)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_keys = [c.key for c in chunks]
return pd.DataFrame({'keys': chunk_keys})
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = TestDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
sut = calc.calculate(sample_drift_data)['keys']
assert 20 == len(sut)
def test_base_drift_calculator_uses_default_chunker_when_no_chunker_specified( # noqa: D103
sample_drift_data, sample_drift_metadata
):
class TestDriftCalculator(DriftCalculator):
def fit(self, reference_data: pd.DataFrame) -> DriftCalculator:
return self
def calculate(self, data: pd.DataFrame) -> pd.DataFrame:
data = preprocess(data, self.model_metadata)
features_and_metadata = NML_METADATA_COLUMNS + self.selected_features
chunks = self.chunker.split(data, columns=features_and_metadata, minimum_chunk_size=500)
chunk_keys = [c.key for c in chunks]
return pd.DataFrame({'keys': chunk_keys})
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = TestDriftCalculator(sample_drift_metadata).fit(ref_data)
sut = calc.calculate(sample_drift_data)['keys']
expected = [
c.key for c in DefaultChunker().split(sample_drift_metadata.enrich(sample_drift_data), minimum_chunk_size=500)
]
assert len(expected) == len(sut)
assert sorted(expected) == sorted(sut)
def test_univariate_statistical_drift_calc_raises_missing_metadata_exception_when_predicted_proba_not_set( # noqa: D103
sample_drift_data, sample_drift_metadata
):
sample_drift_metadata.predicted_probability_column_name = None
with pytest.raises(MissingMetadataException, match="missing value for 'predicted_probability_column_name'"):
_ = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunk_size=5000)
@pytest.mark.parametrize(
'chunker',
[
(PeriodBasedChunker(offset='W')),
(PeriodBasedChunker(offset='M')),
(SizeBasedChunker(chunk_size=1000)),
CountBasedChunker(chunk_count=25),
],
ids=['chunk_period_weekly', 'chunk_period_monthly', 'chunk_size_1000', 'chunk_count_25'],
)
def test_univariate_statistical_drift_calculator_should_return_a_row_for_each_analysis_chunk_key( # noqa: D103
sample_drift_data, sample_drift_metadata, chunker
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunker=chunker).fit(ref_data)
sut = calc.calculate(data=sample_drift_data)
chunks = chunker.split(sample_drift_metadata.enrich(sample_drift_data))
assert len(chunks) == sut.data.shape[0]
chunk_keys = [c.key for c in chunks]
assert 'key' in sut.data.columns
assert sorted(chunk_keys) == sorted(sut.data['key'].values)
def test_univariate_statistical_drift_calculator_should_contain_chunk_details( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
drift = calc.calculate(data=sample_drift_data)
sut = drift.data.columns
assert 'key' in sut
assert 'start_index' in sut
assert 'start_date' in sut
assert 'end_index' in sut
assert 'end_date' in sut
assert 'partition' in sut
def test_univariate_statistical_drift_calculator_returns_stat_column_and_p_value_column_for_each_feature( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunk_size=1000).fit(ref_data)
sut = calc.calculate(data=sample_drift_data).data.columns
for f in sample_drift_metadata.features:
if f.feature_type == FeatureType.CONTINUOUS:
assert f'{f.column_name}_dstat' in sut
else:
assert f'{f.column_name}_chi2' in sut
assert f'{f.column_name}_p_value' in sut
assert f'{sample_drift_metadata.predicted_probability_column_name}_dstat' in sut
def test_univariate_statistical_drift_calculator(sample_drift_data, sample_drift_metadata): # noqa: D103
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
analysis_data = sample_drift_data.loc[sample_drift_data['partition'] == 'analysis']
calc = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
try:
_ = calc.calculate(data=analysis_data)
except Exception:
pytest.fail()
def test_statistical_drift_calculator_deals_with_missing_class_labels( # noqa: D103
sample_drift_data, sample_drift_metadata
):
# rig the data by setting all f3-values in first analysis chunk to 0
sample_drift_data.loc[10080:16000, 'f3'] = 0
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
analysis_data = sample_drift_data.loc[sample_drift_data['partition'] == 'analysis']
calc = UnivariateStatisticalDriftCalculator(sample_drift_metadata, chunk_size=5000).fit(ref_data)
results = calc.calculate(data=analysis_data)
assert not np.isnan(results.data.loc[0, 'f3_chi2'])
assert not np.isnan(results.data.loc[0, 'f3_p_value'])
def test_data_reconstruction_drift_calculator_with_params_should_not_fail( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, n_components=0.75, chunk_period='W').fit(ref_data)
try:
drift = calc.calculate(data=sample_drift_data)
print(drift)
except Exception:
pytest.fail()
def test_data_reconstruction_drift_calculator_with_default_params_should_not_fail( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
try:
drift = calc.calculate(data=sample_drift_data)
print(drift)
except Exception:
pytest.fail()
def test_data_reconstruction_drift_calculator_with_default_params_should_not_fail_w_nans( # noqa: D103
sample_drift_data_with_nans, sample_drift_metadata
):
ref_data = sample_drift_data_with_nans.loc[sample_drift_data_with_nans['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
try:
drift = calc.calculate(data=sample_drift_data_with_nans)
print(drift)
except Exception:
pytest.fail()
def test_data_reconstruction_drift_calculator_should_contain_chunk_details_and_single_drift_value_column( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
drift = calc.calculate(data=sample_drift_data)
sut = drift.data.columns
assert len(sut) == 10
assert 'key' in sut
assert 'start_index' in sut
assert 'start_date' in sut
assert 'end_index' in sut
assert 'end_date' in sut
assert 'partition' in sut
assert 'upper_threshold' in sut
assert 'lower_threshold' in sut
assert 'alert' in sut
assert 'reconstruction_error' in sut
def test_data_reconstruction_drift_calculator_should_contain_a_row_for_each_chunk( # noqa: D103
sample_drift_data, sample_drift_metadata
):
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
drift = calc.calculate(data=sample_drift_data)
sample_drift_data = sample_drift_metadata.enrich(sample_drift_data)
expected = len(PeriodBasedChunker(offset='W').split(sample_drift_data, minimum_chunk_size=1))
sut = len(drift.data)
assert sut == expected
# TODO: find a better way to test this
def test_data_reconstruction_drift_calculator_should_not_fail_when_using_feature_subset( # noqa: D103
sample_drift_data, sample_drift_metadata
):
calc = DataReconstructionDriftCalculator(
model_metadata=sample_drift_metadata, features=['f1', 'f4'], chunk_period='W'
)
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
try:
calc.fit(ref_data)
calc.calculate(sample_drift_data)
except Exception as exc:
pytest.fail(f"should not have failed but got {exc}")
def test_data_reconstruction_drift_calculator_numeric_results(sample_drift_data, sample_drift_metadata): # noqa: D103
ref_data = sample_drift_data.loc[sample_drift_data['partition'] == 'reference']
calc = DataReconstructionDriftCalculator(sample_drift_metadata, chunk_period='W').fit(ref_data)
drift = calc.calculate(data=sample_drift_data)
expected_drift = pd.DataFrame.from_dict(
{
'key': [
'2020-01-06/2020-01-12',
'2020-01-13/2020-01-19',
'2020-01-20/2020-01-26',
'2020-01-27/2020-02-02',
'2020-02-03/2020-02-09',
'2020-02-10/2020-02-16',
'2020-02-17/2020-02-23',
'2020-02-24/2020-03-01',
'2020-03-02/2020-03-08',
'2020-03-09/2020-03-15',
'2020-03-16/2020-03-22',
'2020-03-23/2020-03-29',
'2020-03-30/2020-04-05',
'2020-04-06/2020-04-12',
'2020-04-13/2020-04-19',
'2020-04-20/2020-04-26',
'2020-04-27/2020-05-03',
'2020-05-04/2020-05-10',
'2020-05-11/2020-05-17',
'2020-05-18/2020-05-24',
],
'reconstruction_error': [
0.795939312162986,
0.7840110463966236,
0.8119098730091425,
0.7982130082187159,
0.807815521612754,
0.8492042669464963,
0.7814127409090083,
0.8022621626300768,
0.8104742129966831,
0.7703901270625767,
0.8007070128606296,
0.7953169982962172,
0.7862784182468701,
0.838376989270861,
0.8019280640410021,
0.7154339372837247,
0.7171169593894968,
0.7255999561968017,
0.73493013255886,
0.7777717388501538,
],
}
)
| pd.testing.assert_frame_equal(expected_drift, drift.data[['key', 'reconstruction_error']]) | pandas.testing.assert_frame_equal |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = | tools.to_datetime(data) | pandas.tseries.tools.to_datetime |
import sys
import time
import math
import warnings
import numpy as np
import pandas as pd
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from fmlc.triggering import triggering
from fmlc.baseclasses import eFMU
from fmlc.stackedclasses import controller_stack
class testcontroller1(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init= False
self.output['c'] = self.input['a'] * self.input['b']
return 'testcontroller1 did a computation!'
class testcontroller2(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(0.2)
return 'testcontroller2 did a computation!'
class testcontroller3(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(1)
return 'testcontroller3 did a computation!'
class testcontroller4(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(10)
return 'testcontroller4 did a computation!'
def test_sampletime():
'''This tests if the sample time is working properly'''
controller = {}
controller['forecast1'] = {'function': testcontroller1, 'sampletime': 3}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True)
now = time.time()
while time.time() - now < 10:
controller.query_control(time.time())
df = pd.DataFrame(controller.log_to_df()['forecast1'])
assert df.shape[0] == 5
for i in (np.diff(df.index) / np.timedelta64(1, 's'))[1:]:
assert(math.isclose(i, 3, rel_tol=0.01))
def test_normal():
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller2, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller3, 'sampletime':2}
controller['forecast3'] = {'function':testcontroller1, 'sampletime': 1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2, workers=100)
controller.run_query_control_for(5)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 7
assert df2.shape[0] == 4
assert df3.shape[0] == 7
assert df4.shape[0] == 7
assert df5.shape[0] == 7
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df2['a'][0])
assert pd.isna(df2['b'][0])
assert pd.isna(df2['c'][0])
assert pd.isna(df3['a'][0])
assert pd.isna(df3['b'][0])
assert pd.isna(df3['c'][0])
assert pd.isna(df4['a'][0])
assert pd.isna(df4['b'][0])
assert pd.isna(df4['c'][0])
assert pd.isna(df5['a'][0])
assert pd.isna(df5['b'][0])
assert pd.isna(df5['c'][0])
assert list(df1['a'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df1['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df1['c'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df2['a'])[1:] == [20.0, 20.0, 20.0]
assert list(df2['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df2['c'])[1:] == [80.0, 80.0, 80.0]
assert list(df3['a'])[1:] == [30.0, 30.0, 30.0, 30.0, 30.0, 30.0]
assert list(df3['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df3['c'])[1:] == [120.0, 120.0, 120.0, 120.0, 120.0, 120.0]
assert list(df4['a'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df4['b'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df4['c'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['a'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['b'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df5['c'])[1:] == [16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]
assert list(df1['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df2['logging']) == ['Initialize', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!']
assert list(df3['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df4['logging']) == ['Initialize', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!']
assert list(df5['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
def test_stuckController():
'''This tests if the timeout controllers can be caught'''
## CASE1: mpc1 stuck
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller4, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller1, 'sampletime':1}
controller['forecast3'] = {'function':testcontroller1, 'sampletime':1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=0.5, workers=100)
# Catch warning.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
controller.run_query_control_for(2)
assert len(w) == 3
assert "timeout" in str(w[-1].message)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 4
assert df2.shape[0] == 4
assert df3.shape[0] == 4
#assert df4.shape[0] == 1
assert df5.shape[0] == 1
#assert len(df4.columns) == 1
assert len(df5.columns) == 1
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df2['a'][0])
assert pd.isna(df2['b'][0])
assert pd.isna(df2['c'][0])
assert pd.isna(df3['a'][0])
assert | pd.isna(df3['b'][0]) | pandas.isna |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = | pd.array(expected, dtype="bool") | pandas.array |
"""
This creates Figure S1 - Full Cytokine plots
"""
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import pearsonr
from tfac.dataImport import form_tensor, import_cytokines
from .common import getSetup
def fig_S1_setup():
tensor, _, patInfo = form_tensor()
plasma, _ = import_cytokines()
cytokines = plasma.index
tensor = tensor.T
patInfo = patInfo.T
serum_slice = tensor[0, :, :]
plasma_slice = tensor[1, :, :]
test = pd.concat([pd.DataFrame(serum_slice), | pd.DataFrame(plasma_slice) | pandas.DataFrame |
#!/usr/bin/env python
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from scipy.spatial.distance import pdist, squareform
from sklearn import datasets, preprocessing, manifold
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from glob import glob
import pandas
import umap
import tempfile
import shutil
import argparse
import json
import re
import sys
import os
# Derive stop words and stemmer once
stop_words = set(stopwords.words("english"))
stemmer = PorterStemmer()
def get_parser():
parser = argparse.ArgumentParser(
description="Spack Monitor Analyser",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--data_dir",
help="Directory with data",
default=os.path.join(os.getcwd(), "data"),
)
return parser
def process_text(text):
"""
Process text, including:
1. Lowercase
2. Remove numbers and punctuation
3. Strip whitespace
4. Tokenize and stop word removal
5. Stemming
"""
# Make lowercase
text = text.lower()
# Remove numbers and punctuation (but leave path separator for now)
text = re.sub(r"\d+", "", text)
text = re.sub(r"[^\w\s\/]", "", text)
# Strip whitespace
text = text.strip()
# tokenize and stop word removal
tokens = [x for x in word_tokenize(text) if not x in stop_words]
# Since error output as filepaths get rid of paths!
# Get rid of anything that looks like a path!
tokens = [x for x in tokens if os.sep not in x]
# Don't do stemming here - the error messages are usually hard coded / consistent
# words = [stemmer.stem(t) for t in tokens]
return tokens
def write_json(content, filename):
with open(filename, "w") as fd:
fd.write(json.dumps(content, indent=4))
def read_json(filename):
with open(filename, "r") as fd:
content = json.loads(fd.read())
return content
def build_model(texts, name, outdir):
# 40 epochs means we do it 40 times
model = Doc2Vec(texts, vector_size=50, min_count=5, workers=4, epochs=40)
# Save the model if we need again
model.save(os.path.join(outdir, "model.%s.doc2vec" % name))
# Create a vector for each document
# UIDS as id for each row, vectors across columns
df = pandas.DataFrame(columns=range(50))
print("Generating vector matrix for documents...")
for text in texts:
df.loc[text.tags[0]] = model.infer_vector(text.words)
# Save dataframe to file
df.to_csv(os.path.join(outdir, "%s-vectors.csv" % name))
# Create a distance matrix
distance = pandas.DataFrame(
squareform(pdist(df)), index=list(df.index), columns=list(df.index)
)
distance.to_csv(os.path.join(outdir, "%s-software-distances.csv" % name))
# Try umap first...
reducer = umap.UMAP()
embedding = reducer.fit_transform(distance)
emb = | pandas.DataFrame(embedding, index=distance.index, columns=["x", "y"]) | pandas.DataFrame |
import pandas as pd
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVR
from metalfi.src.data.dataset import Dataset
from metalfi.src.data.memory import Memory
from metalfi.src.data.metadataset import MetaDataset
from metalfi.src.model.evaluation import Evaluation
from metalfi.src.model.featureselection import MetaFeatureSelection
from metalfi.src.model.metamodel import MetaModel
class Controller:
def __init__(self):
self.__train_data = None
self.__data_names = None
self.__meta_data = list()
self.fetchData()
self.storeMetaData()
self.__targets = ["linSVC_SHAP", "LOG_SHAP", "RF_SHAP", "NB_SHAP", "SVC_SHAP",
"linSVC_LIME", "LOG_LIME", "RF_LIME", "NB_LIME", "SVC_LIME",
"linSVC_PIMP", "LOG_PIMP", "RF_PIMP", "NB_PIMP", "SVC_PIMP",
"linSVC_LOFO", "LOG_LOFO", "RF_LOFO", "NB_LOFO", "SVC_LOFO"]
self.__meta_models = [(RandomForestRegressor(n_estimators=100, n_jobs=4), "RF"),
(SVR(), "SVR"),
(LinearRegression(n_jobs=4), "LIN"),
(LinearSVR(dual=True, max_iter=10000), "linSVR")]
def getTrainData(self):
return self.__train_data
def fetchData(self):
data_frame, target = Memory.loadTitanic()
data_1 = Dataset(data_frame, target)
data_frame_2, target_2 = Memory.loadCancer()
data_2 = Dataset(data_frame_2, target_2)
data_frame_3, target_3 = Memory.loadIris()
data_3 = Dataset(data_frame_3, target_3)
data_frame_4, target_4 = Memory.loadWine()
data_4 = Dataset(data_frame_4, target_4)
data_frame_5, target_5 = Memory.loadBoston()
data_5 = Dataset(data_frame_5, target_5)
open_ml = [(Dataset(data_frame, target), name) for data_frame, name, target in Memory.loadOpenML()]
self.__train_data = [(data_1, "Titanic"), (data_2, "Cancer"), (data_3, "Iris"), (data_4, "Wine"),
(data_5, "Boston")] + open_ml
self.__data_names = dict({})
i = 0
for data, name in self.__train_data:
self.__data_names[name] = i
i += 1
def storeMetaData(self):
for dataset, name in self.__train_data:
if not (Memory.getPath() / ("input/" + name + "meta.csv")).is_file():
print("meta-data calc.: " + name)
meta = MetaDataset(dataset, True)
data = meta.getMetaData()
d_times, t_times = meta.getTimes()
nr_feat, nr_inst = meta.getNrs()
Memory.storeInput(data, name)
Memory.storeDataFrame(DataFrame(data=d_times, index=["Time"], columns=[x for x in d_times]),
name + "XmetaX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
Memory.storeDataFrame(DataFrame(data=t_times, index=["Time"], columns=[x for x in t_times]),
name + "XtargetX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
def loadMetaData(self):
for dataset, name in self.__train_data:
sc = StandardScaler()
data = Memory.load(name + "meta.csv", "input")
fmf = [x for x in data.columns if "." not in x]
dmf = [x for x in data.columns if "." in x]
X_f = DataFrame(data=sc.fit_transform(data[fmf]), columns=fmf)
X_d = DataFrame(data=data[dmf], columns=dmf)
data_frame = pd.concat([X_d, X_f], axis=1)
self.__meta_data.append((data_frame, name))
def selectMetaFeatures(self, meta_model_name="", memory=False):
sets = None
if memory:
sets = Memory.loadMetaFeatures()
if sets is None:
data = [d for d, n in self.__meta_data if n != meta_model_name]
fs = MetaFeatureSelection(pd.concat(data), self.__targets)
sets = {}
for meta_model, name in self.__meta_models:
print("Select meta-features: " + name)
tree = (name == "RF")
percentiles = [10]
if memory:
tree = False
percentiles = [25]
sets[name] = fs.select(meta_model, f_regression, percentiles, k=5, tree=tree)
if memory:
Memory.storeMetaFeatures(sets)
return sets
def trainMetaModel(self):
self.loadMetaData()
for i in range(0, len(self.__meta_data)):
test_data, test_name = self.__meta_data[i]
train_data = list()
for j in range(0, len(self.__meta_data)):
if not (i == j):
train_data.append(self.__meta_data[j][0])
path = Memory.getPath() / ("model/" + test_name)
if not path.is_file():
print("Train meta-model: " + test_name)
og_data, name = self.__train_data[self.__data_names[test_name]]
model = MetaModel(pd.concat(train_data), test_name + "meta",
test_data, og_data, self.selectMetaFeatures(test_name),
self.__meta_models, self.__targets)
model.fit()
Memory.storeModel(model, test_name, None)
def evaluate(self, names):
evaluation = Evaluation(names)
evaluation.predictions()
def questions(self, names, offset):
evaluation = Evaluation(names)
evaluation.questions(names[:offset])
def compare(self, names):
evaluation = Evaluation(names)
evaluation.comparisons(["linSVR"],
["linSVC_SHAP", "LOG_SHAP", "RF_SHAP", "NB_SHAP", "SVC_SHAP"], ["LM"], False)
def metaFeatureImportances(self):
data = [d for d, _ in self.__meta_data]
models = [(RandomForestRegressor(n_estimators=50, n_jobs=4), "RF", "tree"),
(SVR(), "SVR", "kernel"),
(LinearRegression(n_jobs=4), "LIN", "linear"),
(LinearSVR(max_iter=1000), "linSVR", "linear")]
targets = ["linSVC_SHAP", "LOG_SHAP", "RF_SHAP", "NB_SHAP", "SVC_SHAP"]
importance = MetaFeatureSelection.metaFeatureImportance( | pd.concat(data) | pandas.concat |
# %%
import os
import sys
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import navis
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import connectome_tools.process_matrix as pm
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = | pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input']) | pandas.DataFrame |
import numpy as np
import pandas as pd;
# python list
data = [1,2,3,4,5];
# numpy
ndata = np.array(data);
# pandas
pdata = pd.Series(data);
print(pdata[0]);
print(pdata.values);
print(pdata.index);
pdata2 = pd.Series(data, index=['A','B','C','D','E']);
print(pdata2);
print(pdata2['C']);
# dic
data2 = {'name':'kim','ko':100,'en':90,'ma':80, 'si':100};
pdata3 = | pd.Series(data2) | pandas.Series |
"""Functions related to SCOP classes of structures."""
import os
import pandas as pd
SCOP_CLA_LATEST_FILE = 'atom3d/data/metadata/scop-cla-latest.txt'
PDB_CHAIN_SCOP2_UNIPROT_FILE = 'atom3d/data/metadata/pdb_chain_scop2_uniprot.csv'
PDB_CHAIN_SCOP2B_SF_UNIPROT_FILE = 'atom3d/data/metadata/pdb_chain_scop2b_sf_uniprot.csv'
def get_scop_index():
"""Get index mapping from PDB code and chain to SCOP classification."""
# Load core SCOP database. Mapping from domains to classification.
scop = pd.read_csv(
SCOP_CLA_LATEST_FILE, skiprows=6, delimiter=' ',
names=['fa-domid', 'fa-pdbid', 'fa-pdbreg', 'fa-uniid', 'fa-unireg',
'sf-domid', 'sf-pdbid', 'sf-pdbreg', 'sf-uniid', 'sf-unireg',
'scop'])
scop['pdb_code'] = scop['fa-pdbid'].apply(lambda x: x.lower())
scop['type'] = \
scop['scop'].apply(lambda x: int(x.split(',')[0].split('=')[1]))
scop['class'] = \
scop['scop'].apply(lambda x: int(x.split(',')[1].split('=')[1]))
scop['fold'] = \
scop['scop'].apply(lambda x: int(x.split(',')[2].split('=')[1]))
scop['superfamily'] = \
scop['scop'].apply(lambda x: int(x.split(',')[3].split('=')[1]))
scop['family'] = \
scop['scop'].apply(lambda x: int(x.split(',')[4].split('=')[1]))
del scop['scop']
# Load mapping of representatives to scop domains.
scop2_uniprot = pd.read_csv(
PDB_CHAIN_SCOP2_UNIPROT_FILE, skiprows=2,
names=['pdb_code', 'chain', 'sp-primary', 'sf-domid', 'fa-domid'],
)
# Some superfamily entries are a bit messed up here, so we remove.
scop2_uniprot = scop2_uniprot[scop2_uniprot['sf-domid'].str.isnumeric()]
scop2_uniprot['sf-domid'] = | pd.to_numeric(scop2_uniprot['sf-domid']) | pandas.to_numeric |
"""Excel Model"""
__docformat__ = "numpy"
# pylint: disable=abstract-class-instantiated
import logging
import pandas as pd
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load_configuration(excel_file: str):
"""
Load in the Excel file to determine the configuration that needs to be set.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
configuration: dictionary
Returns a dictionary with the configurations set.
"""
# Read in the Optimization template
df = pd.read_excel(
excel_file,
sheet_name="Optimization",
skiprows=2,
usecols="B:D",
names=["Parameter", "Value", "Description"],
index_col="Parameter",
)
# Remove completely empty NaN rows
cleaned_df = df.dropna(axis="rows", thresh=2)
# Filter out any general columns
filtered_df = cleaned_df[cleaned_df["Description"] != "Description"]
# Convert to Dictionary
configuration = filtered_df.to_dict()
return configuration["Value"], configuration["Description"]
@log_start_end(log=logger)
def load_allocation(excel_file: str):
"""
Load in the Excel file to determine the allocation that needs to be set.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
tickers: list
Returns a list with ticker symbols
categories: dictionary
Returns a dictionary that specifies each category
"""
if str(excel_file).endswith(".xlsx"):
categories = pd.read_excel(excel_file, sheet_name="Allocation", usecols="A:G")
categories = categories.dropna(axis="rows")
elif str(excel_file).endswith(".csv"):
categories = pd.read_excel(excel_file)
categories = categories.dropna(axis="rows")
else:
console.print("Only Excel (.xlsx and .csv) files are accepted.\n")
return [], {}
categories.columns = [
col.upper().strip().replace(" ", "_") for col in categories.columns
]
categories = categories.apply(lambda x: x.astype(str).str.upper())
categories = categories[~categories.index.duplicated(keep="first")]
try:
categories.set_index("TICKER", inplace=True)
categories.sort_index(inplace=True)
except KeyError:
console.print("Allocation table needs a TICKER column\n")
return [], {}
tickers = list(categories.index)
tickers.sort()
categories = categories.to_dict()
return tickers, categories
@log_start_end(log=logger)
def load_bl_views(excel_file: str):
"""
Load a Excel file with views for Black Litterman model.
Parameters
----------
excel_file: str
The location of the Excel file that needs to be loaded.
Returns
-------
p_views: list
Returns a list with p_views matrix
q_views: list
Returns a list with q_views matrix
"""
if str(excel_file).endswith(".xlsx"):
try:
p_views = pd.read_excel(excel_file, sheet_name="p_views", index_col=0)
p_views = p_views.fillna(0)
p_views = p_views.dropna(axis="rows")
except KeyError:
console.print("Excel file needs a p_views sheet\n")
return {}, {}
try:
q_views = pd.read_excel(excel_file, sheet_name="q_views", index_col=0)
q_views = q_views.dropna(axis="rows")
except KeyError:
console.print("Excel file needs a p_views sheet\n")
return {}, {}
else:
console.print("Only Excel (.xlsx) files are accepted.\n")
return {}, {}
p_views = p_views.T.sort_index()
p_views = p_views.T.to_csv(index=False, header=0).replace("\n", ";")
p_views = p_views[:-1]
p_views = [[float(item) for item in row.split(",")] for row in p_views.split(";")]
q_views = q_views.to_csv(index=False, header=0).replace("\n", ",")
q_views = q_views[:-1]
q_views = [float(item) for item in q_views.split(",")]
return p_views, q_views
@log_start_end(log=logger)
def excel_bl_views(file: str, stocks: str, n: int = 3):
"""
Create an Excel file with required format to build n views for Black Litterman cmd.
Parameters
----------
stocks: str
List of stocks used to build the Black Litterman model views.
n: int
The number of views that will be created.
Returns
-------
file: excel
Returns a list with ticker symbols
"""
if len(stocks) < 2:
console.print("Please have at least 2 loaded tickers to create views.\n")
p_views = [[""] * len(stocks) for i in range(n)]
p_views_df = pd.DataFrame(p_views, columns=stocks)
q_views = [[""] for i in range(n)]
q_views_df = pd.DataFrame(q_views, columns=["Returns"])
if file.endswith(".xlsx"):
pass
else:
file += ".xlsx"
with | pd.ExcelWriter(file) | pandas.ExcelWriter |
from typing import List
import numpy as np
import itertools
import sklearn.neighbors
import functools
import joblib
import psutil
import warnings
import pandas as pd
from numpy.typing import NDArray
from ..base import BaseExplainer
from ._cadex_parallel import compute_criterion # type: ignore
class ECE(BaseExplainer):
def __init__(self, k: int, columns: List[str], bces: List[BaseExplainer], dist: int, h: int,
lambda_: float, n_jobs=None):
self._col_names = columns
self.k = k
self.bces = bces
self.norm = dist
self.h = h
self.lambda_ = np.float32(lambda_)
if n_jobs is None:
self.n_jobs = psutil.cpu_count(logical=False)
else:
self.n_jobs = n_jobs
self._cfs_len: int
self._aggregated_cfs: NDArray[np.float32]
def _aggregate_cfs(self, x) -> NDArray[np.float32]:
list_cfs: List[NDArray[np.float32]] = []
for bce in self.bces:
bce_result: NDArray[np.float32] = np.asarray(bce.generate(x).values)
for bce_r in bce_result:
list_cfs.append(bce_r)
cfs: NDArray[np.float32] = np.unique(np.asarray(list_cfs), axis=0)
self._cfs_len = cfs.shape[0]
assert isinstance(cfs, np.ndarray)
return cfs
def _choose_best_k(self, valid_cfs: NDArray[np.float32], x_series):
x = x_series.values
norms: NDArray[np.float32] = np.apply_along_axis(functools.partial(np.linalg.norm, ord=self.norm),
0, valid_cfs)
C = list(valid_cfs / norms)
k = min(self.k, self._cfs_len)
if k != self.k:
warnings.warn(f'k parameter > number of aggregated counterfactuals. Changing k from {self.k} to {k}',
UserWarning, stacklevel=3)
if self._cfs_len <= self.h:
warnings.warn(
f"knn's h parameter >= number of aggregated counterfactuals. Changing h from {self.h} to {self._cfs_len - 1}",
UserWarning, stacklevel=3)
self.h = self._cfs_len - 1
k_subsets = list()
for i in range(k):
k_subsets += list(itertools.combinations(C, r=i + 1))
knn_c = sklearn.neighbors.KNeighborsClassifier(n_neighbors=k)
c_np: NDArray[np.float32] = np.asarray(C)
knn_c.fit(c_np, np.ones(shape=c_np.shape[0]))
S_ids = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(compute_criterion)(knn_c, self.norm, self.lambda_, c_np, x, S) for S in k_subsets)
selected = norms * k_subsets[np.argmax(np.asarray(S_ids))]
return selected
def generate(self, x: pd.Series) -> pd.DataFrame:
self._aggregated_cfs = self._aggregate_cfs(x)
k_subset = self._choose_best_k(self._aggregated_cfs, x)
return | pd.DataFrame(k_subset, columns=self._col_names) | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_solvency.py
@time: 2019-01-28 11:33
"""
import gc, six
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
@six.add_metaclass(Singleton)
class FactorSolvency(object):
"""
偿债能力
"""
def __init__(self):
__str__ = 'factor_solvency'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '偿债能力'
self.description = '财务指标的二级指标-偿债能力'
@staticmethod
def BondsToAsset(tp_solvency, factor_solvency, dependencies=['bonds_payable', 'total_assets']):
"""
:name: 应付债券与总资产之比
:desc: 应付债券MRQ/资产总计MRQ*100%
"""
management = tp_solvency.loc[:, dependencies]
management['BondsToAsset'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.bonds_payable.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def BookLev(tp_solvency, factor_solvency, dependencies=['total_non_current_liability', 'total_assets']):
"""
:name: 账面杠杆
:desc:非流动负债合计/股东权益合计(含少数股东权益)(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['BookLev'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.total_non_current_liability.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def CurrentRatio(tp_solvency, factor_solvency, dependencies=['total_current_assets', 'total_current_liability']):
"""
:name: 流动比率
:desc: 流动资产合计/流动负债合计(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['CurrentRatio'] = np.where(
CalcTools.is_zero(management.total_current_liability.values), 0,
management.total_current_assets.values / management.total_current_liability.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def DA(tp_solvency, factor_solvency, dependencies=['total_liability', 'total_assets']):
"""
:name: 债务总资产比
:desc:负债合计MRQ/资产总计MRQ
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DA'] = np.where(
CalcTools.is_zero(contrarian['total_assets']), 0,
contrarian['total_liability'] / contrarian['total_assets'])
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def DTE(tp_solvency, factor_solvency,
dependencies=['total_liability', 'total_current_liability', 'fixed_assets']):
"""
:name:有形净值债务率
:desc:负债合计/有形净值(MRQ)
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DTE'] = np.where(
CalcTools.is_zero(contrarian['total_current_liability'] + contrarian['fixed_assets']), 0,
contrarian['total_current_liability'] / (contrarian['total_current_liability'] + contrarian['fixed_assets'])
)
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def EquityRatio(tp_solvency, factor_solvency,
dependencies=['total_liability', 'equities_parent_company_owners']):
"""
:name:权益比率
:desc:负债合计/归属母公司股东的权益(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
management['EquityRatio'] = management.apply(func, axis=1)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(management, factor_solvency, how='outer', on='security_code')
return factor_solvency
@staticmethod
def EquityPCToIBDebt(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable']):
"""
:name:归属母公司股东的权益/带息负债
:desc:归属母公司股东的权益/带息负债(补充 带息负债 = 短期借款+一年内到期的长期负债+长期借款+应付债券+应付利息)
"""
management = tp_solvency.loc[:, dependencies]
management["debt"] = (management.shortterm_loan +
management.non_current_liability_in_one_year +
management.longterm_loan +
management.bonds_payable +
management.interest_payable)
management['EquityPCToIBDebt'] = np.where(
CalcTools.is_zero(management.debt.values), 0,
management.equities_parent_company_owners.values / management.debt.values)
dependencies = dependencies + ['debt']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def EquityPCToTCap(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'total_owner_equities',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan', 'bonds_payable',
'interest_payable']):
"""
:name:归属母公司股东的权益/全部投入资本 (补充 全部投入资本=所有者权益合计+带息债务)
:desc: 归属母公司股东的权益/全部投入资本 (补充 全部投入资本=所有者权益合计+带息债务)
"""
management = tp_solvency.loc[:, dependencies]
management["tc"] = (management.total_owner_equities
+ management.shortterm_loan
+ management.non_current_liability_in_one_year
+ management.longterm_loan
+ management.bonds_payable
+ management.interest_payable)
management['EquityPCToTCap'] = np.where(
CalcTools.is_zero(management.tc.values), 0,
management.equities_parent_company_owners.values / management.tc.values)
dependencies = dependencies + ['tc']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
# InteBearDebtToTotalCapital = 有息负债/总资本 总资本=固定资产+净运营资本 净运营资本=流动资产-流动负债
# InteBearDebtToTotalCapital = 有息负债/(固定资产 + 流动资产 - 流动负债)
@staticmethod
def IntBDToCap(tp_solvency, factor_solvency, dependencies=['shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable',
'fixed_assets',
'total_current_assets',
'total_current_liability']):
"""
:name:带息负债/全部投入资本
:desc:带息债务/全部投入资本*100%(MRQ)
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['interest_bearing_liability'] = contrarian['shortterm_loan'] + \
contrarian['non_current_liability_in_one_year'] + \
contrarian['longterm_loan'] + \
contrarian['bonds_payable'] + contrarian['interest_payable']
contrarian['IntBDToCap'] = np.where(
CalcTools.is_zero(contrarian['fixed_assets'] + contrarian['total_current_assets'] + \
contrarian['total_current_liability']), 0,
contrarian['interest_bearing_liability'] / (contrarian['fixed_assets'] + contrarian['total_current_assets']
+ contrarian['total_current_liability'])
)
dependencies = dependencies + ['interest_bearing_liability']
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, how='outer', on="security_code")
return factor_solvency
@staticmethod
def LDebtToWCap(tp_solvency, factor_solvency, dependencies=['total_current_assets',
'total_current_liability',
'total_non_current_assets']):
"""
:name:长期负债与营运资金比率
:desc:非流动负债合计/(流动资产合计-流动负债合计)
"""
management = tp_solvency.loc[:, dependencies]
management['LDebtToWCap'] = np.where(
CalcTools.is_zero(management.total_current_assets.values - management.total_current_liability.values), 0,
management.total_non_current_assets.values
/ (management.total_current_assets.values - management.total_current_liability.values))
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def MktLev(tp_solvency, factor_solvency, dependencies=['total_non_current_liability', 'market_cap']):
"""
:name:市场杠杆
:desc:非流动负债合计MRQ/(非流动负债台计MRO+总市值)
"""
management = tp_solvency.loc[:, dependencies]
management['MktLev'] = np.where(
CalcTools.is_zero(management.market_cap.values), 0,
management.total_non_current_liability.values /
(management.total_non_current_liability.values + management.market_cap.values))
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def QuickRatio(tp_solvency, factor_solvency,
dependencies=['total_current_assets', 'total_current_liability', 'inventories']):
"""
:name:速动比率
:desc:(流动资产合计-存货)/流动负债合计(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['QuickRatio'] = np.where(
CalcTools.is_zero(management.total_current_liability.values), 0,
(management.total_current_assets.values - management.inventories.values)
/ management.total_current_liability.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def TNWorthToIBDebt(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable']):
"""
:name:有形净值/带息负债
:desc:有形净值/带息负债(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['ta'] = (management.equities_parent_company_owners -
management.intangible_assets -
management.development_expenditure -
management.good_will -
management.long_deferred_expense -
management.deferred_tax_assets)
management['ibd'] = (management.shortterm_loan +
management.non_current_liability_in_one_year +
management.longterm_loan +
management.bonds_payable +
management.interest_payable)
management['TNWorthToIBDebt'] = np.where(
CalcTools.is_zero(management.ibd.values), 0,
management.ta.values / management.ibd.values)
dependencies = dependencies + ['ta', 'ibd']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def SupQuickRatio(tp_solvency, factor_solvency, dependencies=['cash_equivalents',
'trading_assets',
'bill_receivable',
'account_receivable',
'other_receivable',
'total_current_liability']):
"""
:name:超速动比率
:desc:(货币资金+交易性金融资资产+应收票据+应收帐款+其他应收款)/流动负债合计(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['SupQuickRatio'] = np.where(
CalcTools.is_zero(management.total_current_liability.values), 0,
(management.cash_equivalents.values +
management.trading_assets.values +
management.bill_receivable.values +
management.account_receivable.values +
management.other_receivable.values) /
management.total_current_liability.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def TNWorthToNDebt(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable',
'cash_equivalents']):
"""
:name:有形净值/净债务
:desc:有形净值/净债务(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['ta'] = (management.equities_parent_company_owners -
management.intangible_assets -
management.development_expenditure -
management.good_will -
management.long_deferred_expense -
management.deferred_tax_assets)
management['nd'] = (management.shortterm_loan +
management.non_current_liability_in_one_year +
management.longterm_loan +
management.bonds_payable +
management.interest_payable -
management.cash_equivalents)
management['TNWorthToNDebt'] = np.where(
CalcTools.is_zero(management.nd.values), 0,
management.ta.values / management.nd.values)
dependencies = dependencies + ['ta', 'nd']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def OPCToDebt(ttm_solvency, factor_solvency,
dependencies=['net_operate_cash_flow_mrq', 'total_current_liability']):
"""
:name:现金流债务比
:desc:经营活动现金净流量(MRQ)/流动负债(MRQ)*100%
"""
cash_flow = ttm_solvency.loc[:, dependencies]
cash_flow['OPCToDebt'] = np.where(
CalcTools.is_zero(cash_flow.total_current_liability.values), 0,
cash_flow.net_operate_cash_flow_mrq.values / cash_flow.total_current_liability.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, cash_flow, how='outer', on="security_code")
return factor_solvency
@staticmethod
def OptCFToCurrLiability(tp_solvency, factor_solvency, dependencies=['net_operate_cash_flow_mrq',
'total_current_liability']):
"""
:name:经营活动产生的现金流量净额(MRQ)/流动负债(MRQ)
:desc:经营活动产生的现金流量净额(MRQ)/流动负债(MRQ)
"""
cash_flow = tp_solvency.loc[:, dependencies]
cash_flow['OptCFToCurrLiability'] = np.where(
CalcTools.is_zero(cash_flow.total_current_liability.values), 0,
cash_flow.net_operate_cash_flow_mrq.values / cash_flow.total_current_liability.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, cash_flow, how='outer', on="security_code")
return factor_solvency
@staticmethod
def CashRatioTTM(ttm_solvency, factor_solvency, dependencies=['cash_and_equivalents_at_end',
'total_current_assets']):
"""
:name:期末现金及现金等价物余额(TTM)/流动负债(TTM)
:desc:期末现金及现金等价物余额(TTM)/流动负债(TTM)
"""
cash_flow = ttm_solvency.loc[:, dependencies]
cash_flow['CashRatioTTM'] = np.where(CalcTools.is_zero(cash_flow.total_current_assets.values),
0,
cash_flow.cash_and_equivalents_at_end.values / cash_flow.total_current_assets.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, cash_flow, how='outer', on="security_code")
return factor_solvency
@staticmethod
def InterestCovTTM(ttm_solvency, factor_solvency, dependencies=['total_profit',
'financial_expense',
'interest_income']):
"""
:name: 利息保障倍数
:desc:息税前利润/利息费用,息税前利润=利润总额+利息费用,利息费用=利息支出-利息收入
"""
earning = ttm_solvency.loc[:, dependencies]
earning['InterestCovTTM'] = np.where(
CalcTools.is_zero(earning.financial_expense.values - earning.interest_income.values), 0,
(earning.total_profit.values + earning.financial_expense.values - earning.interest_income.values) /
(earning.financial_expense.values - earning.interest_income.values))
earning = earning.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, earning, how='outer', on="security_code")
return factor_solvency
@staticmethod
def OptCFToLiabilityTTM(ttm_solvency, factor_solvency,
dependencies=['net_operate_cash_flow', 'total_liability']):
"""
:name:经营活动净现金流(TTM)/负债(TTM)
:desc:经营活动净现金流(TTM)/负债(TTM)
"""
cash_flow = ttm_solvency.loc[:, dependencies]
cash_flow['OptCFToLiabilityTTM'] = np.where(
CalcTools.is_zero(cash_flow.total_liability.values), 0,
cash_flow.net_operate_cash_flow.values / cash_flow.total_liability.values)
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, cash_flow, how='outer', on="security_code")
return factor_solvency
@staticmethod
def OptCFToIBDTTM(ttm_solvency, factor_solvency, dependencies=['net_operate_cash_flow',
'shortterm_loan',
'non_current_liability_in_one_year_ttm',
'longterm_loan',
'bonds_payable',
'interest_payable'
]):
"""
:name:经营活动净现金流(TTM)/带息负债(TTM)
:desc:经营活动净现金流(TTM)/带息负债(TTM)
"""
cash_flow = ttm_solvency.loc[:, dependencies]
cash_flow['interest_bearing_liability'] = cash_flow['shortterm_loan'] + \
cash_flow['non_current_liability_in_one_year_ttm'] + \
cash_flow['longterm_loan'] + \
cash_flow['bonds_payable'] + cash_flow['interest_payable']
cash_flow['OptCFToIBDTTM'] = np.where(
CalcTools.is_zero(cash_flow.interest_bearing_liability.values), 0,
cash_flow.net_operate_cash_flow.values / cash_flow.interest_bearing_liability.values)
dependencies = dependencies + ['interest_bearing_liability']
cash_flow = cash_flow.drop(dependencies, axis=1)
factor_solvency = | pd.merge(factor_solvency, cash_flow, how='outer', on="security_code") | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description : This code do basic statistical tests (i.e., student t-test, fold change,
Benjamini-Hochberg false discovery rate adjustment) for peak table generated
by MZmine-2.53
Copyright : (c) LemasLab, 02/23/2020
Author : <NAME>
License : MIT License
Maintainer : <EMAIL>, <EMAIL>, <EMAIL>
Usage : python add_stats.py -i $input_peak_table
-d $design_file_location
-o $output_peak_table
-l $library_location
"""
import warnings
import logging
import logging.handlers
import pandas as pd
import numpy as np
from statsmodels.stats.multitest import multipletests
from scipy import stats
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s]: %(levelname)s: %(message)s')
warnings.filterwarnings('ignore')
def add_threshold(row, names):
"""Add threshold for blank subtraction algorithm.
# Arguments:
row: certain row of peak table (pandas dataframe).
names: column names in the peak table of a certain group of samples
# Returns:
threshold value
"""
value = np.mean(row[names]) + 3*np.std(row[names])
return value if value > 0 else 5000
def blank_subtraction_flag(row, name_group, name_threshold, bar):
"""Blank subtraction function.
Blank subtraction algorithm:
- Calculate mean (mean_blank) and standard deviation (sd_blank)
of peak intensions in blank samples.
- Threshold ← mean_blank+3*sd_blank
- If threshold <=0, then replace it with 5,000 (why 5,000?)
- Calculate mean peak intension in fat (mean_fat), whole (mean_whole)
and skim (mean_skim) samples.
- ratio_fat ← (mean_fat-threshold)/threshold;
ratio_whole ← (mean_whole-threshold)/threshold;
ratio_skim ← (mean_skim-threshold)/threshold
- If ratio_fat<self_defined_number (e.g. 100) and
ratio_whole<self_defined_number and ratio_skim<self_defined_number,
then drop the peak.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
name_threshold: name of the threshold column.
bar: bar value of blank subtraction algorithm.
# Returns:
If a certain peak of this group still exist after blank subtraction
"""
return (np.mean(row[name_group]) - row[name_threshold])/row[name_threshold] > bar
# Judge whether certain peak intensity of a sample is 0 or not
def zero_intensity_flag(row, name_group):
"""Check if the mean intensity of certain group of samples is zero. If zero, then
the metabolite is not existed in that material.
# Arguments:
row: certain row of peak table (pandas dataframe).
name_group: name of the group.
# Returns:
True (the mean intensity is zero) or False (the mean intensity is not zero).
"""
return np.mean(row[name_group]) <= 0
# Add p-value for student t-test between two groups of samples
def add_pvalue(row, left_names, right_names):
"""Add p value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
p value of student t-test
"""
_, p = stats.ttest_ind(row[left_names], row[right_names])
return p
# Add t-value for student t-test between two groups of samples
def add_tvalue(row, left_names, right_names):
"""Add t value for two group comparison based on student t-test.
# Arguments:
row: certain row of peak table (pandas dataframe).
left_names: column names in the peak table of the first group of samples.
right_names: column names in the peak table of the second group of samples.
# Returns:
t value of student t-test
"""
t, _ = stats.ttest_ind(row[left_names], row[right_names])
return t
# Add fold-change for the mean values of two groups of samples
def fold_change(row, left, right):
"""Add fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
left: column name in the peak table of the mean intensity of first group of samples.
right: column name in the peak table of the mean intensity of second group of samples.
# Returns:
fold change value.
"""
if row[right] == 0:
return np.inf
if row[left] == 0:
return -np.inf
result = row[left]/row[right]
return result if result >= 1 else -1/result
# Absolute value of fold-change
def abs_fold_change(row, fold_change_column):
"""Add absolute fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
fold_change_column: column name in the peak table of the fold change value.
# Returns:
absolute fold change value.
"""
return abs(row[fold_change_column])
# Add ppm value for identified metabolites.
## The library search result produced by MZmine may exceed 5 ppm,
## so those beyond 5 ppm should be filtered out
def add_ppm(row, library_df):
"""Add part per million (ppm) value for library matching. The library matching done by
MZmine may not follow the threshold strictly (i.e., when setting the ppm to 5, some
metabolites with ppm of more than 5 may also appear in the peak table).
# Arguments:
row: certain row of peak table (pandas dataframe).
library_df: library dataframe.
# Returns:
ppm value of the matched metabolite in the row.
"""
if pd.isnull(row['row identity (main ID)']):
return None
mzs = list(library_df[library_df.Name.str.strip() == row['row identity (main ID)']]['M/Z'])
mz_observe = row["row m/z"]
diff = []
for mz in mzs:
diff.append(abs(mz_observe - mz))
if len(diff) == 0:
return None
mz_theoretical = mzs[diff.index(min(diff))]
return abs((mz_observe-mz_theoretical)*10e5/mz_theoretical)
def add_label(row, group1_name, group2_name):
"""Add label for metabolite represented by the row.
Format: "m_z/retention_time/fold_change".
# Arguments:
row: certain row of peak table (pandas dataframe).
group1_name: name of the group of first group of samples.
group2_name: name of the group of second group of samples.
# Returns:
label (string type).
"""
if | pd.isnull(row["row identity (main ID)"]) | pandas.isnull |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [ | Timedelta('0 days 01:00:00') | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
qualify donor data
"""
# %% REQUIRED LIBRARIES
import os
import argparse
import json
import ast
import pandas as pd
import datetime as dt
import numpy as np
# %% USER INPUTS (choices to be made in order to run the code)
codeDescription = "qualify donor data"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument(
"-d",
"--date-stamp",
dest="date_stamp",
default=dt.datetime.now().strftime("%Y-%m-%d"),
help="date, in '%Y-%m-%d' format, of the date when " +
"donors were accepted"
)
parser.add_argument(
"-u",
"--userid",
dest="userid",
default=np.nan,
help="userid of account shared with the donor group or master account"
)
parser.add_argument(
"-o",
"--output-data-path",
dest="data_path",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "data"
)
),
help="the output path where the data is stored"
)
parser.add_argument("-q",
"--qualification-criteria",
dest="qualificationCriteria",
default=os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"tidepool-qualification-criteria.json")
),
type=argparse.FileType('r'),
help="JSON file to be processed, see " +
"tidepool-qualification-critier.json " +
"for a list of required fields")
parser.add_argument(
"-s",
"--save-dayStats",
dest="save_dayStats",
default="False",
help="save the day stats used for qualifying (True/False)"
)
args = parser.parse_args()
# %% FUNCTIONS
def defineStartAndEndIndex(args, nDonors):
startIndex = int(args.startIndex)
endIndex = int(args.endIndex)
if endIndex == -1:
if startIndex == 0:
endIndex = nDonors
else:
endIndex = startIndex + 1
if endIndex == -2:
endIndex = nDonors
return startIndex, endIndex
def removeNegativeDurations(df):
if "duration" in list(df):
nNegativeDurations = sum(df.duration.astype(float) < 0)
if nNegativeDurations > 0:
df = df[~(df.duration.astype(float) < 0)]
else:
nNegativeDurations = np.nan
return df, nNegativeDurations
def add_uploadDateTime(df):
if "upload" in data.type.unique():
uploadTimes = pd.DataFrame(
df[df.type == "upload"].groupby("uploadId").time.describe()["top"]
)
else:
uploadTimes = pd.DataFrame(columns=["top"])
# if an upload does not have an upload date, then add one
# NOTE: this is a new fix introduced with healthkit data...we now have
# data that does not have an upload record
unique_uploadIds = set(df["uploadId"].unique())
unique_uploadRecords = set(
df.loc[df["type"] == "upload", "uploadId"].unique()
)
uploadIds_missing_uploadRecords = unique_uploadIds - unique_uploadRecords
for upId in uploadIds_missing_uploadRecords:
last_upload_time = df.loc[df["uploadId"] == upId, "time"].max()
uploadTimes.loc[upId, "top"] = last_upload_time
uploadTimes.reset_index(inplace=True)
uploadTimes.rename(
columns={
"top": "uploadTime",
"index": "uploadId"
},
inplace=True
)
df = pd.merge(df, uploadTimes, how='left', on='uploadId')
df["uploadTime"] = pd.to_datetime(df["uploadTime"])
return df
def filterAndSort(groupedDF, filterByField, sortByField):
filterDF = groupedDF.get_group(filterByField).dropna(axis=1, how="all")
filterDF = filterDF.sort_values(sortByField)
return filterDF
def getClosedLoopDays(groupedData, qualCriteria, metadata):
# filter by basal data and sort by time
if "basal" in groupedData.type.unique():
basalData = filterAndSort(groupedData, "basal", "time")
# get closed loop days
nTB = qualCriteria["nTempBasalsPerDayIsClosedLoop"]
tbDataFrame = basalData.loc[basalData.deliveryType == "temp", ["time"]]
tbDataFrame.index = pd.to_datetime(tbDataFrame["time"])
tbDataFrame = tbDataFrame.drop(["time"], axis=1)
tbDataFrame["basal.temp.count"] = 1
nTempBasalsPerDay = tbDataFrame.resample("D").sum()
closedLoopDF = pd.DataFrame(nTempBasalsPerDay,
index=nTempBasalsPerDay.index.date)
closedLoopDF["date"] = nTempBasalsPerDay.index.date
closedLoopDF["basal.closedLoopDays"] = \
closedLoopDF["basal.temp.count"] >= nTB
nClosedLoopDays = closedLoopDF["basal.closedLoopDays"].sum()
# get the number of days with 670g
basalData["date"] = | pd.to_datetime(basalData.time) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 14:10:00 2020
@author: asweet
"""
import pandas as pd
import numpy as np
from sqlalchemy.types import Integer, Numeric, String, DateTime
from sqlalchemy import create_engine
import urllib.parse
from sys import platform
from abc import ABC, abstractmethod
import time
def get_conncection_string(driver, server, database, user_name = 'username', password = 'password', dialect = 'mssql'):
driver_str = driver.replace(' ', '+')
# connection_string docs https://docs.sqlalchemy.org/en/13/core/engines.html
return '%s+pyodbc://%s:%s@%s/%s?driver=%s'%(dialect, username, urllib.parse.quote_plus(password), server, database, driver_str)
def get_engine(driver, server, database, dialect, fast_executemany = True):
connection_string = get_conncection_string(driver, server, database, dialect = dialect)
return create_engine(connection_string, fast_executemany = fast_executemany)
def does_output_table_exist(driver, output_server, output_database, output_table, dialect):
engine = get_engine(driver, output_server, output_database, dialect)
return engine.dialect.has_table(engine.connect(), output_table)
def create_output_table(sql, driver, output_server, output_database, dialect):
engine = get_engine(driver, output_server, output_database, dialect)
with engine.begin() as conn:
conn.execute(sql)
class Process(ABC):
default_schema = 'dbo'
def __init__(self, output_meta, out_dtypes, verbose, dialect = 'mssql', sql_driver = 'sql_driver', use_backend = True):
""" Base init """
self.dialect = dialect
self.driver = sql_driver
self.use_backend = use_backend
self.verbose = verbose
self.output_server = output_meta['server']
self.output_database = output_meta['database']
self.output_schema = output_meta['schema']
self.output_table = output_meta['table']
self.out_dtypes = out_dtypes
self.output_table_full = '.'.join([self.output_database, self.output_schema, self.output_table])
if use_backend:
if self.does_output_table_exist() == False:
try:
self.create_output_table()
except Exception as e:
print('failed to create output table with exception: {}'.format(e))
self.data = None
self.has_data = False
self.exceptions = []
def does_output_table_exist(self):
engine = get_engine(self.driver, self.output_server, self.output_database, self.dialect)
return engine.dialect.has_table(engine.connect(), self.output_table)
def create_output_table(self):
if self.verbose:
print('creating output table: {}'.format(self.output_table_full))
engine = get_engine(self.driver, self.output_server, self.output_database, self.dialect)
with engine.begin() as conn:
conn.execute(self.create_sql.format(self.output_database))
def get_truncate_statement(self):
sql = 'TRUNCATE TABLE {}.{}.{};'.format(self.output_database, self.output_schema, self.output_table)
return sql
def push_to_sql(self):
out_cols = list(self.out_dtypes.keys())
engine = get_engine(self.driver, self.output_server, self.output_database, self.dialect)
with engine.begin() as conn:
# truncate table
conn.execute(self.get_truncate_statement())
# push updated data
self.data[out_cols].to_sql(self.output_table, con = conn, index = False, if_exists = 'append', dtype = self.out_dtypes)
@abstractmethod
def get_data(self):
""" get data"""
def process(self, push_to_sql = True):
if self.use_backend == False:
push_to_sql = False
start_time = time.time()
if self.verbose:
print('getting data')
self.get_data()
if self.verbose:
print('finished getting data after {} seconds'.format(time.time() - start_time))
if self.has_data:
if push_to_sql:
start_time = time.time()
if self.verbose:
print('pushing to sql')
try:
self.push_to_sql()
except Exception as e:
print('failed to push to sql with exception: {}'.format(e))
if self.verbose:
print('finished pushing to sql after {} seconds'.format(time.time() - start_time))
else:
return self.data
else:
print('no data found')
class COVID_19_JHU(Process):
create_sql = (
"""
-- create query
"""
)
out_dtypes = {
'FIPS': Integer(),
'Admin2': String(50),
'Province_State': String(50),
'Country_Region': String(50),
'Combined_Key': String(128),
'Lat': Numeric(18, 7),
'Long': Numeric(18, 7),
'Confirmed': Integer(),
'Deaths': Integer(),
'Recovered': Integer(),
'Active': Integer(),
'Date': DateTime(),
'Last_Update': DateTime(),
}
output_meta = {
'server': 'server',
'database': 'database',
'schema': 'schema',
'table': 'covid_19_jhu',
}
def __init__(self, verbose = False, use_backend = True):
super().__init__(self.output_meta, self.out_dtypes, verbose, use_backend = use_backend)
def get_data(self):
try:
# source: https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
base_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
start_date = '2020-01-22' # first date of data from <NAME>
end_date = pd.Timestamp.today()
date_idx = pd.date_range(start = start_date, end = end_date, freq = 'D')
df_list = []
for date in date_idx:
try:
csv_file = date.strftime('%m-%d-%Y') + '.csv'
this_csv = base_url + csv_file
df = pd.read_csv(this_csv)
df['Date'] = date
df_list.append(df)
except Exception as e:
print('{} not found'.format(csv_file))
self.exceptions.append(e)
col_names = []
for df in df_list:
col_names = col_names + list(df)
col_names_mapping = {
'Province/State': 'Province_State',
'Country/Region': 'Country_Region',
'Last Update': 'Last_Update',
'Latitude': 'Lat',
'Longitude': 'Long',
'Long_': 'Long'
}
updated_df_list = []
for df in df_list:
for old_name, new_name in col_names_mapping.items():
if old_name in df.columns:
df = df.rename({old_name: new_name}, axis = 1)
if new_name == 'Last_Update':
df[new_name] = pd.to_datetime(df[new_name])
updated_df_list.append(df)
df_append = pd.DataFrame()
for df in updated_df_list:
df_append = df_append.append(df, sort = False).reset_index(drop = True)
# some cleaning steps
df_append['Last_Update'] = pd.to_datetime(df_append['Last_Update'])
country_region_mappings = {
'Republic of Korea': 'Korea, South',
'Iran (Islamic Republic of)': 'Iran',
'Mainland China': 'China'
}
for old_val, new_val in country_region_mappings.items():
if old_val in df_append['Country_Region'].unique():
df_append.loc[df_append['Country_Region'] == old_val, 'Country_Region'] = new_val
int_cols = ['Confirmed', 'Deaths', 'Recovered', 'Active', 'FIPS']
for col in int_cols:
if col in df_append.columns:
df_append[col] = df_append[col].astype('Int64') # nullable integer type
self.data = df_append
self.has_data = True
except Exception as e:
print('failed with exception {}'.format(e))
class COVID_19_SFC(Process):
create_sql = (
"""
-- create query
"""
)
out_dtypes = {
'geography': String(50),
'bay_area': String(4),
'cases': Integer(),
'deaths': Integer(),
'running_total_of_cases': Integer(),
'running_total_of_deaths': Integer(),
'date': DateTime(),
}
output_meta = {
'server': 'server',
'database': 'database',
'schema': 'schema',
'table': 'covid_19_sfc',
}
def __init__(self, verbose = False, use_backend = True):
super().__init__(self.output_meta, self.out_dtypes, verbose, use_backend = use_backend)
@staticmethod
def fetch_sfc_json():
json_url = 'https://sfc-project-files.s3.amazonaws.com/project-feeds/covid19_us_cases_ca_by_county_.json'
return pd.read_json(json_url)
def get_data(self):
try:
df_in = self.fetch_sfc_json()
df_in = df_in.drop(['TOTALS', 'ROW'], axis = 1)
df_unpivot = df_in.melt(id_vars = ['GEOGRAPHY', 'BAY AREA', 'CATEGORY'])
df_unpivot = df_unpivot.rename({'variable': 'Date'}, axis = 1)
df_pivot_table = pd.pivot_table(df_unpivot.fillna('_'), values = 'value', index = ['GEOGRAPHY', 'BAY AREA', 'Date'],
columns = ['CATEGORY'], aggfunc = np.max).reset_index()
df_pivot_table['deaths'] = df_pivot_table['deaths'].astype(str).replace('', '0').astype(int)
df_pivot_table['cases'] = df_pivot_table['cases'].astype(str).replace('', '0').astype(int)
df_pivot_table['BAY AREA'] = df_pivot_table['BAY AREA'].replace('', np.nan)
df_cumsum = df_pivot_table.groupby(by = ['GEOGRAPHY', 'Date']).sum().groupby(level = [0]).cumsum().reset_index()
df_cumsum = df_cumsum.rename({'cases': 'running_total_of_cases', 'deaths': 'running_total_of_deaths'}, axis = 1)
df_output = df_pivot_table.merge(df_cumsum, on = ['GEOGRAPHY', 'Date'])
df_output['Date'] = pd.to_datetime(df_output['Date'])
df_output.columns = [col.lower().replace(' ', '_') for col in df_output.columns]
self.data = df_output
self.has_data = True
except Exception as e:
print('failed with exception {}'.format(e))
class COVID_19_NYT(Process):
create_sql = (
"""
-- create query
"""
)
out_dtypes = {
'fips': String(5),
'county': String(50),
'state': String(50),
'cases': Integer(),
'deaths': Integer(),
'date': DateTime(),
}
output_meta = {
'server': 'server',
'database': 'database',
'schema': 'schema',
'table': 'covid_19_nyt',
}
def __init__(self, verbose = False, use_backend = True):
super().__init__(self.output_meta, self.out_dtypes, verbose, use_backend)
def get_data(self):
try:
raw_url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
df_nyt = pd.read_csv(raw_url, dtype = {'fips': 'str'})
df_nyt['date'] = pd.to_datetime(df_nyt['date'])
df_nyt['cases'] = df_nyt['cases'].astype('Int64')
df_nyt['deaths'] = df_nyt['deaths'].astype('Int64')
self.data = df_nyt
self.has_data = True
except Exception as e:
print('failed with exception {}'.format(e))
class COVID_19_JHU_US(Process):
create_sql = (
"""
-- create sql
"""
)
out_dtypes = {
'uid': String(8),
'iso2': String(2),
'iso3': String(3),
'code3': String(3),
'fips': String(5),
'admin2': String(64),
'province_state': String(50),
'country_region': String(4),
'lat': Numeric(18, 7),
'long': Numeric(18, 7),
'combined_key': String(64),
'date': DateTime(),
'confirmed': Integer(),
'deaths': Integer(),
}
output_meta = {
'server': 'server',
'database': 'database',
'schema': 'schema',
'table': 'covid_19_jhu_us',
}
def __init__(self, verbose = False, use_backend = True):
super().__init__(self.output_meta, self.out_dtypes, verbose, use_backend)
def get_data(self):
try:
url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
df_jhu_confirmed = | pd.read_csv(url_confirmed) | pandas.read_csv |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), | pd.Timestamp("20130101") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 08:06:31 2021
@author: bcamc
"""
#%% Import Packages
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes
from matplotlib.lines import Line2D
import pandas as pd
import numpy as np
import scipy
from scipy.stats.stats import pearsonr, spearmanr
import cartopy
import cartopy.crs as ccrs
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xarray as xr
from sklearn.decomposition import IncrementalPCA
import os
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from datetime import datetime
from sklearn import linear_model
import datetime
import cartopy.io.shapereader as shpreader
from cartopy.feature import ShapelyFeature
import shapely.geometry as sgeom
from shapely.ops import unary_union
from shapely.prepared import prep
import joblib
from joblib import Parallel, delayed
from obspy.geodetics import kilometers2degrees, degrees2kilometers
import cmocean
import seaborn as sns
from tabulate import tabulate
# Progress bar package
from tqdm import tqdm
# Gibbs seawater properties packages
import gsw
# Import pre-built mapping functions
from SO_mapping_templates import haversine, South_1ax_map, South_1ax_flat_map
# Import function to calculate fluxes
from Fluxes import calculate_fluxes
# Import taylor diagram script
from taylorDiagram import TaylorDiagram
#%% Define directories
front_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/'
lana_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/dmsclimatology/'
jarnikova_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/Jarnikova_SO_files/'
#%% Set working directories
dir_ = 'C:\\Users\\bcamc\\OneDrive\\Desktop\\Python\\Projects\\sulfur\\southern_ocean\\Scripts'
if os.getcwd() != dir_:
os.chdir(dir_)
#%% Read in data (optional)
export_dir = 'C:/Users/bcamc/OneDrive/Desktop/Python/Projects/sulfur/southern_ocean/export_data/'
models_combined = pd.read_csv(export_dir+'models_combined.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
X_full_plus = pd.read_csv(export_dir+'X_full_plus.csv').set_index(['datetime','latbins','lonbins'])
# ANN_y_pred = pd.read_csv(export_dir+'ANN_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# RFR_y_pred = pd.read_csv(export_dir+'RFR_y_pred.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# y = pd.read_csv(export_dir+'y.csv').set_index(['datetime','latbins','lonbins']).squeeze('columns')
# X = pd.read_csv(export_dir+'X.csv').set_index(['datetime','latbins','lonbins'])
# X_full = X_full_plus.drop(['dSSHA','currents','SRD'],axis=1)
#%% Post-processing
# ***** Load in models/data using "SO_DMS_build_models.py" *****
# for plotting
reordered_months = np.array([10.,11.,12.,1.,2.,3.,4.])
# Average predictions
RFR_y_pred_mean = np.sinh(RFR_y_pred).groupby(['latbins','lonbins']).mean()
ANN_y_pred_mean = np.sinh(ANN_y_pred).groupby(['latbins','lonbins']).mean()
# calculate Si*
Si_star = (X_full.loc[:,'Si']-X_full.loc[:,'SSN']).squeeze()
X_full_plus['Si_star'] = Si_star
#------------------------------------------------------------------------------
# Import ACC front locations
front_data = xr.open_dataset(front_dir+'Park_durand_fronts.nc')
fronts = dict()
to_bin = lambda x: np.round(x /grid) * grid
#------------------------------------------------------------------------------
# NB front
fronts['NB'] = pd.DataFrame(np.stack([front_data.LatNB.values,
front_data.LonNB.values,
np.ones(front_data.LonNB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['NB'] = fronts['NB'].sort_values('lonbins')
fronts['NB']['latbins'] = fronts['NB']['latbins'].map(to_bin).round(3)
fronts['NB']['lonbins'] = fronts['NB']['lonbins'].map(to_bin).round(3)
fronts['NB'] = fronts['NB'].set_index(['latbins','lonbins']).squeeze()
fronts['NB'] = fronts['NB'][~fronts['NB'].index.duplicated(keep='first')]
# fronts['NB'] = fronts['NB'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SAF front
fronts['SAF'] = pd.DataFrame(np.stack([front_data.LatSAF.values,
front_data.LonSAF.values,
np.ones(front_data.LonSAF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SAF'] = fronts['SAF'].sort_values('lonbins')
fronts['SAF']['latbins'] = fronts['SAF']['latbins'].map(to_bin).round(3)
fronts['SAF']['lonbins'] = fronts['SAF']['lonbins'].map(to_bin).round(3)
fronts['SAF'] = fronts['SAF'].set_index(['latbins','lonbins']).squeeze()
fronts['SAF'] = fronts['SAF'][~fronts['SAF'].index.duplicated(keep='first')]
# fronts['SAF'] = fronts['SAF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# PF front
fronts['PF'] = pd.DataFrame(np.stack([front_data.LatPF.values,
front_data.LonPF.values,
np.ones(front_data.LonPF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['PF'] = fronts['PF'].sort_values('lonbins')
fronts['PF']['latbins'] = fronts['PF']['latbins'].map(to_bin).round(3)
fronts['PF']['lonbins'] = fronts['PF']['lonbins'].map(to_bin).round(3)
fronts['PF'] = fronts['PF'].set_index(['latbins','lonbins']).squeeze()
fronts['PF'] = fronts['PF'][~fronts['PF'].index.duplicated(keep='first')]
# fronts['PF'] = fronts['PF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SACCF front
fronts['SACCF'] = pd.DataFrame(np.stack([front_data.LatSACCF.values,
front_data.LonSACCF.values,
np.ones(front_data.LonSACCF.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SACCF'] = fronts['SACCF'].sort_values('lonbins')
fronts['SACCF']['latbins'] = fronts['SACCF']['latbins'].map(to_bin).round(3)
fronts['SACCF']['lonbins'] = fronts['SACCF']['lonbins'].map(to_bin).round(3)
fronts['SACCF'] = fronts['SACCF'].set_index(['latbins','lonbins']).squeeze()
fronts['SACCF'] = fronts['SACCF'][~fronts['SACCF'].index.duplicated(keep='first')]
# fronts['SACCF'] = fronts['SACCF'].reindex_like(models_combined.loc[1])
#------------------------------------------------------------------------------
# SB front
fronts['SB'] = pd.DataFrame(np.stack([front_data.LatSB.values,
front_data.LonSB.values,
np.ones(front_data.LonSB.values.shape)],axis=1),
columns=['latbins','lonbins','locs'])
fronts['SB'] = fronts['SB'].sort_values('lonbins')
fronts['SB']['latbins'] = fronts['SB']['latbins'].map(to_bin).round(3)
fronts['SB']['lonbins'] = fronts['SB']['lonbins'].map(to_bin).round(3)
fronts['SB'] = fronts['SB'].set_index(['latbins','lonbins']).squeeze()
fronts['SB'] = fronts['SB'][~fronts['SB'].index.duplicated(keep='first')]
# fronts['SB'] = fronts['SB'].reindex_like(models_combined.loc[1])
# front_data.close(); del front_data
#------------------------------------------------------------------------------
SA = gsw.SA_from_SP(SP=X_full.loc[:,'SAL'].values, p=1, lon=X_full.index.get_level_values('lonbins').values, lat=X_full.index.get_level_values('latbins').values)
CT = gsw.CT_from_t(SA=SA, t=X_full.loc[:,'SST'].values, p=1)
density = gsw.density.rho(SA=SA,CT=CT,p=1)
density = pd.Series(density, index=X_full.loc[:,'chl'].index)
#%% Model Sea-Air Fluxes
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# ===================
# RFR Model
# ===================
#-----------------------------------------------------------------------------
# Fluxes (umol m^-2 d^-1):
RFR_flux = dict()
k_dms, RFR_flux['GM12'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, RFR_flux['SD02'] = calculate_fluxes(data=np.sinh(RFR_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
RFR_flux['GM12'] = pd.Series(RFR_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
RFR_flux['GM12'] = RFR_flux['GM12'][(RFR_flux['GM12'] >= 0) & (RFR_flux['GM12'].notna())].reindex_like(RFR_y_pred)
RFR_flux['SD02'] = pd.Series(RFR_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# ANN Model
# ===================
#-----------------------------------------------------------------------------
ANN_flux = dict()
_, ANN_flux['GM12'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='GM12')
_, ANN_flux['SD02'] = calculate_fluxes(data=np.sinh(ANN_y_pred).values,
ice_cover=X_full.loc[:,'ice'].values,
wind_speed=X_full.loc[:,'wind'].values,
T=X_full.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
ANN_flux['GM12'] = pd.Series(ANN_flux['GM12'], index=X_full.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
ANN_flux['GM12'] = ANN_flux['GM12'][(ANN_flux['GM12'] >= 0) & (ANN_flux['GM12'].notna())].reindex_like(ANN_y_pred)
ANN_flux['SD02'] = pd.Series(ANN_flux['SD02'], index=X_full.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Actual
# ===================
#-----------------------------------------------------------------------------
obs_flux = dict()
_, obs_flux['GM12'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='GM12')
_, obs_flux['SD02'] = calculate_fluxes(data=np.sinh(y).values,
ice_cover=X.loc[:,'ice'].values,
wind_speed=X.loc[:,'wind'].values,
T=X.loc[:,'SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
obs_flux['GM12'] = pd.Series(obs_flux['GM12'], index=X.loc[:,'SST'].index, name='DMS flux')
# filter out negative estimates
obs_flux['GM12'] = obs_flux['GM12'][(obs_flux['GM12'] >= 0) & (obs_flux['GM12'].notna())].reindex_like(y)
obs_flux['SD02'] = pd.Series(obs_flux['SD02'], index=X.loc[:,'SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
# ===================
# Regional Fluxes
# ===================
#-----------------------------------------------------------------------------
#~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Constants:
A = ((max_lat-min_lat)*111*1000)*((max_lon-min_lon)*111*1000) # total regional area
A_ocean = A*frac_ocean # fraction of total area covered by ocean
S_mol_mass = 32.06 # molar mass of sulfur
num_days = np.sum(np.array([31,30,31,31,28,31,30])) # number of total days in the dataset
#-----------------------------------------------------------------------------
# Regional modelled flux (convert to Tg over total days)
RFR_flux_reg = (RFR_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
ANN_flux_reg = (ANN_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
obs_flux_reg = (obs_flux['GM12']*S_mol_mass*A_ocean*num_days)/(1e6*1e12)
fluxes_combined = pd.concat([RFR_flux['GM12'], ANN_flux['GM12']], axis=1).mean(axis=1)
#%% Lana Climatology Sea-air Fluxes
files = os.listdir(lana_dir)
# Set 1x1o coords
lana_coords = dict()
lana_coords['lat'] = pd.Series(np.arange(-89,91,1), name='latbins')
lana_coords['lon'] = pd.Series(np.arange(-179,181,1), name='lonbins')
time_match = {'OCT':10,'NOV':11,'DEC':12,'JAN':1,'FEB':2,'MAR':3,'APR':4}
# Retrive DMS climatology values, adding lats/lons to dataframes
lana_clim = []
for file in files:
frame = pd.DataFrame(np.flipud(pd.read_csv(lana_dir+file, header=None)),
index=lana_coords['lat'], columns=lana_coords['lon'])
frame = frame.stack(dropna=False)
frame = frame.reset_index()
frame['datetime'] = np.tile(float(time_match[file.split('.')[0][-3:]]), len(frame))
frame = frame.set_index(['datetime','latbins','lonbins']).squeeze()
frame.name = 'DMS'
lana_clim.append(frame)
lana_clim = pd.concat(lana_clim)
# Regrid variables to compute sea-air fluxes
lana = dict()
for var in ['wind','ice','SST']:
lana[var] = X_full.loc[:,var].copy()
lana[var] = lana[var].reset_index()
lana[var] = lana[var].rename(columns={'lonbins':'lon','latbins':'lat'})
# regrid to nearest degree (i.e. 1x1o grid)
lana[var]['latbins'] = lana[var].lat.round(0).astype('int32')
lana[var]['lonbins'] = lana[var].lon.round(0).astype('int32')
lana[var] = lana[var].set_index(['datetime','latbins','lonbins'])
lana[var] = lana[var].drop(columns=['lat','lon'])
lana[var] = lana[var].groupby(['datetime','latbins','lonbins']).mean().squeeze()
lana[var] = lana[var].sort_index().reindex_like(lana_clim)
print(var+' regrid complete')
# Compute sea-air flux
#-----------------------------------------------------------------------------
lana_flux = dict()
_, lana_flux['GM12'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='GM12')
_, lana_flux['SD02'] = calculate_fluxes(data=lana_clim.values,
ice_cover=lana['ice'].values,
wind_speed=lana['wind'].values,
T=lana['SST'].values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
lana_flux['GM12'] = pd.Series(lana_flux['GM12'], index=lana['SST'].index, name='DMS flux')
# filter out negative estimates
lana_flux['GM12'] = lana_flux['GM12'][(lana_flux['GM12'] >= 0) & (lana_flux['GM12'].notna())].reindex_like(lana_clim)
lana_flux['SD02'] = pd.Series(lana_flux['SD02'], index=lana['SST'].index, name='DMS flux')
#-----------------------------------------------------------------------------
del frame
#%% Jarnikova Climatology Sea-air Fluxes
# This climatology is from Dec to Feb (Jarnikova & Tortell, 2016)
mat = scipy.io.loadmat(jarnikova_dir+'nov26product.mat')
tj_dms = mat['structname'][0,1]['barnessmooth'][0,0]
tj_lats = mat['structname'][0,1]['latvec'][0,0][0,:]
tj_lons = mat['structname'][0,1]['lonvec'][0,0][0,:]
jarnikova_clim = pd.DataFrame(tj_dms, index=tj_lats, columns=tj_lons)
jarnikova_clim.index = jarnikova_clim.index.rename('latbins')
jarnikova_clim.columns = jarnikova_clim.columns.rename('lonbins')
jarnikova_clim = jarnikova_clim.stack()
# Reindex like lana et al. climatology
jarnikova_clim = jarnikova_clim.reindex_like(lana_clim.loc[[12,1,2]].groupby(['latbins','lonbins']).mean())
# Calculate the fluxes
#-----------------------------------------------------------------------------
jarnikova_flux = dict()
_, jarnikova_flux['GM12'] = calculate_fluxes(data=jarnikova_clim,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean(),
parameterization='GM12')
_, jarnikova_flux['SD02'] = calculate_fluxes(data=jarnikova_clim.values,
ice_cover=lana['ice'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
wind_speed=lana['wind'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
T=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().values,
parameterization='SD02')
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
# Set as series
jarnikova_flux['GM12'] = pd.Series(jarnikova_flux['GM12'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
# filter out negative estimates
jarnikova_flux['GM12'] = jarnikova_flux['GM12'][(jarnikova_flux['GM12'] >= 0) & (jarnikova_flux['GM12'].notna())].reindex_like(jarnikova_clim)
jarnikova_flux['SD02'] = pd.Series(jarnikova_flux['SD02'], index=lana['SST'].loc[[12,1,2]].groupby(['latbins','lonbins']).mean().index, name='DMS flux')
#-----------------------------------------------------------------------------
del mat
#%% Compute KDEs for fluxes
def KDE(y):
"""
A modifed wrapper function pulled from the Pandas source code
(https://github.com/pandas-dev/pandas/blob/0.21.x/pandas/plotting/_core.py#L1381-L1430)
that returns the kernel density estimates of a Pandas Series/sliced DataFrame
using scipy's gaussian_kde function. It is efficient like the pandas native
plotting function (because it only fits a subset of only 1000 points from the
distribution) but it returns the actual values instead of an axes handle.
Parameters
----------
y : Series or sliced Dataframe
Input data.
Returns
-------
evals : Series or Dataframe
col1: Fitted indices (1000 samples between data max/min bounds);
col2: evaluated kernel density estimates at each indice.
"""
from scipy.stats import gaussian_kde
y = y.dropna()
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
kde = gaussian_kde(y.dropna())
vals = kde.evaluate(ind)
evals = pd.concat([ | pd.Series(ind, name='ind') | pandas.Series |
# pylint: disable-msg=E1101,W0613,W0603
import os
import copy
from collections import defaultdict
import numpy as np
import pandas.json as _json
from pandas.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.formats.printing import pprint_thing
loads = _json.loads
dumps = _json.dumps
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if isinstance(obj, Series):
s = SeriesWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
elif isinstance(obj, DataFrame):
s = FrameWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
encoding=encoding)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
fh, handles = _get_handle(filepath_or_buffer, 'r',
encoding=encoding)
json = fh.read()
fh.close()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
if lines:
# If given a json lines file, we break the string into lines, add
# commas and put it in a json list to make a valid json object.
lines = list(StringIO(json.strip()))
json = u'[' + u','.join(lines) + u']'
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
# ---------------------------------------------------------------------
# JSON normalization routines
def _convert_to_line_delimits(s):
"""Helper function that converts json lists to line delimited json."""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
from pandas.lib import convert_json_to_lines
return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, compat.string_types):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + '.' + k
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise'):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* ignore : will ignore KeyError if keys listed in meta are not
always present
* raise : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': '<NAME>'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': '<NAME>', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': '<NAME>'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 <NAME> Florida FL
1 Broward 40000 <NAME> Florida FL
2 <NAME> 60000 <NAME> Florida FL
3 Summit 1234 <NAME> Ohio OH
4 Cuyahoga 1337 <NAME> Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
meta_keys = ['.'.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise \
KeyError("Try running with "
"errors='ignore' as key "
"%s is not always present", e)
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in | compat.iteritems(meta_vals) | pandas.compat.iteritems |
"""
Makes Data Available in Standard Formats. Creates the following data:
df_gene_description DF
cn.GENE_ID (index), cn.GENE_NAME, cn.LENGTH, cn.PRODUCT, cn.START, cn.END, cn.STRAND
Dataframes in dfs_centered_adjusted_read_count
cn.GENE_ID (index), columns: time indices
hypoxia curve DF
cn.SAMPLE, cn.HOURS, 0, 1, 2 (DO values), mean, std, cv
df_normalized
index: cn.GENE_ID
column: time
df_mean, # Mean values of counts
df_std, # Std of count values
df_cv, # Coefficient of variation
df_stage_matrix,
df_gene_expression_state, # Genes expressed in each state
df_go_terms
cn.GENE_ID cn.GO_TERM
df_ec_terms
cn.GENE_ID cpn.KEGG_EC
df_ko_terms
cn.GENE_ID cpn.KEGG_KO
df_kegg_pathways
cpn.KEGG_PATHWAY cpn.DESCRIPTION
df_kegg_gene_pathways
cn.GENE_ID cpn.KEGG_PATHWAY
dfs_read_count - raw read count dataframes
index: cn.GENE_ID
column: time
dfs_adjusted_read_count - readcounts adjusted w.r.t. library size, gene length
index: cn.GENE_ID
column: time
dfs_adjusted_read_count_wrt0 - adjusted w.r.t. libary size, gene length, time 0
index: cn.GENE_ID
column: time
dfs_centered_adjusted_read_count - centers w.r.t. mean value of gene
index: cn.GENE_ID
column: time
"""
import common.constants as cn
import common_python.constants as cpn
from common_python.util.persister import Persister
import os
import pandas as pd
import numpy as np
FILENAME_HYPOXIA = "hypoxia_curve_DO"
FILENAME_NORMALIZED = "normalized_log2_transformed_counts"
FILENAME_READS = "hypoxia_timecourse_reads"
FILENAME_STAGES = "stages_matrix"
FILENAME_GENEDATA = "gene_data"
FILENAME_GENE_EXPRESSION_STATE = "gene_expression_state"
FILENAME_GO_TERMS = "MTB.GO.All.GOterms"
FILENAME_EC_TERMS = "mtb_gene_ec"
FILENAME_KO_TERMS = "mtb_gene_ec"
FILENAME_KEGG_PATHWAYS = "mtb_kegg_pathways"
FILENAME_KEGG_GENE_PATHWAY = "mtb_kegg_gene_pathway"
NUM_REPL = 3
TIME_0 = "T0"
T0 = 0
MIN_LOG2_VALUE = -10
MIN_VALUE = 10e-5
MILLION = 1e6
KILOBASE = 1e3 # Thousand bases
class DataProvider(object):
# Instance variables in the class
instance_variables = [
"df_gene_description",
"df_gene_expression_state",
"df_hypoxia",
"df_mean",
"df_std",
"df_cv",
"df_normalized",
"df_stage_matrix",
"df_go_terms",
"df_ec_terms",
"df_ko_terms",
"df_kegg_pathways",
"df_kegg_gene_pathways",
"dfs_read_count",
"_dfs_adjusted_read_count",
"_dfs_adjusted_read_count_wrt0",
"_dfs_centered_adjusted_read_count",
]
def __init__(self, data_dir=cn.DATA_DIR, is_normalized_wrtT0=True,
is_only_qgenes=True, is_display_errors=True):
"""
:param bool is_normalized_wrtT0: normalize data w.r.t. T0
Otherwise, standardize values using the mean.
:param bool is_only_qgenes: only include genes included in multi-hypothesis test
"""
self._data_dir = data_dir
self._is_normalized_wrtT0 = is_normalized_wrtT0
self._is_only_qgenes = is_only_qgenes
self._is_display_errors = is_display_errors
self._setValues()
def _setValues(self, provider=None):
"""
Sets values for the instance variables.
:param DataProvider provider:
"""
for var in self.__class__.instance_variables:
if provider is None:
stmt = "self.%s = None" % var
else:
stmt = "self.%s = provider.%s" % (var, var)
exec(stmt)
def _makeDFFromCSV(self, filename, is_index_geneid=False):
"""
Processes a CSV file
:param str filename: without csv extension
:param bool is_index_geneid: use cn.GENE_ID to index
:return pd.DataFrame:
"""
path = os.path.join(self._data_dir, "%s.csv" % filename)
df = | pd.read_csv(path) | pandas.read_csv |
import os
import json
import sqlite3 as sql
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from catboost import CatBoostRegressor
from typing import List
import logging
from constants import (
UTILS_PATH, STREETS_FOR_PREDICTIONS_FILE,
CURRENT_FILENAMES_FILE, SELECT_DATETIME_STREETS_QUERY,
METABASE_DATA_PATH, CRASHES_AFTER_ETL, STREET_NAME, DATETIME,
DAY_FORMAT, PREDICTOR_FILE
)
def build_predictions() -> List[str]:
"""
Makes number of crashes prediction for tomorrow for each street from STREETS_FOR_PREDICTIONS_FILE list.
:return: list, top-10 streets where accidents are more likely to occur tomorrow.
"""
with open(os.path.join(UTILS_PATH, STREETS_FOR_PREDICTIONS_FILE)) as f:
streets_for_predictions = f.read().split('\n')[:-1]
street_to_id = {street: ind for ind, street in enumerate(streets_for_predictions)}
yesterday = datetime.today() - timedelta(days=7) # shift for 7 days
last_month = set([(yesterday - timedelta(days=x)).strftime(DAY_FORMAT) for x in range(30)])
with open(os.path.join(UTILS_PATH, CURRENT_FILENAMES_FILE), "r") as f:
FILENAMES = json.load(f)
filename = FILENAMES[CRASHES_AFTER_ETL]
conn = sql.connect(f"{METABASE_DATA_PATH}{filename}.db")
query = f"{SELECT_DATETIME_STREETS_QUERY} '{filename}'"
df = pd.read_sql_query(query, conn)
df[DATETIME] = df[DATETIME].apply(lambda x: x.split("T")[0])
df = df[df[STREET_NAME].isin(streets_for_predictions)]
df = df.reset_index(drop=True)
df = df[df[DATETIME].isin(last_month)]
df[DATETIME] = | pd.to_datetime(df[DATETIME]) | pandas.to_datetime |
import json
from datetime import datetime
import pandas as pd
from collections import namedtuple
from FinanceTools import *
import numpy as np
import sys
class StatusInvest:
def __init__(self, inFile):
self.orders = pd.read_csv(inFile)
self.orders['Category'] = self.orders['Category'].apply(lambda x: 'Ações' if x == 'Stock' else 'FII\'s')
self.orders['Type'] = self.orders['Type'].apply(lambda x: 'C' if x == 'Compra' else 'V')
self.orders['Date'] = | pd.to_datetime(self.orders['Date']) | pandas.to_datetime |
'''
Importing pandasTools enables several features that allow for using RDKit molecules as columns of a Pandas dataframe.
If the dataframe is containing a molecule format in a column (e.g. smiles), like in this example:
>>> from rdkit.Chem import PandasTools
>>> import pandas as pd
>>> import os
>>> from rdkit import RDConfig
>>> antibiotics = pd.DataFrame(columns=['Name','Smiles'])
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C','Name':'Penicilline G'}, ignore_index=True)#Penicilline G
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4O)O)O)O)C(=O)N)N(C)C)O','Name':'Tetracycline'}, ignore_index=True)#Tetracycline
>>> antibiotics = antibiotics.append({'Smiles':'CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O)O)C','Name':'Ampicilline'}, ignore_index=True)#Ampicilline
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles']
>>> print(antibiotics)
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
1 Tetracycline CC1(C2CC3C(C(=O)C(=C(C3(C(=O)C2=C(C4=C1C=CC=C4...
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
a new column can be created holding the respective RDKit molecule objects. The fingerprint can be included to accelerate substructure searches on the dataframe.
>>> PandasTools.AddMoleculeColumnToFrame(antibiotics,'Smiles','Molecule',includeFingerprints=True)
>>> print([str(x) for x in antibiotics.columns])
['Name', 'Smiles', 'Molecule']
A substructure filter can be applied on the dataframe using the RDKit molecule column, because the ">=" operator has been modified to work as a substructure check.
Such the antibiotics containing the beta-lactam ring "C1C(=O)NC1" can be obtained by
>>> beta_lactam = Chem.MolFromSmiles('C1C(=O)NC1')
>>> beta_lactam_antibiotics = antibiotics[antibiotics['Molecule'] >= beta_lactam]
>>> print(beta_lactam_antibiotics[['Name','Smiles']])
Name Smiles
0 Penicilline G CC1(C(N2C(S1)C(C2=O)NC(=O)CC3=CC=CC=C3)C(=O)O)C
2 Ampicilline CC1(C(N2C(S1)C(C2=O)NC(=O)C(C3=CC=CC=C3)N)C(=O...
It is also possible to load an SDF file can be load into a dataframe.
>>> sdfFile = os.path.join(RDConfig.RDDataDir,'NCI/first_200.props.sdf')
>>> frame = PandasTools.LoadSDF(sdfFile,smilesName='SMILES',molColName='Molecule',includeFingerprints=True)
>>> frame.info # doctest: +SKIP
<bound method DataFrame.info of <class 'pandas.core.frame.DataFrame'>
Int64Index: 200 entries, 0 to 199
Data columns:
AMW 200 non-null values
CLOGP 200 non-null values
CP 200 non-null values
CR 200 non-null values
DAYLIGHT.FPG 200 non-null values
DAYLIGHT_CLOGP 200 non-null values
FP 200 non-null values
ID 200 non-null values
ISM 200 non-null values
LIPINSKI_VIOLATIONS 200 non-null values
NUM_HACCEPTORS 200 non-null values
NUM_HDONORS 200 non-null values
NUM_HETEROATOMS 200 non-null values
NUM_LIPINSKIHACCEPTORS 200 non-null values
NUM_LIPINSKIHDONORS 200 non-null values
NUM_RINGS 200 non-null values
NUM_ROTATABLEBONDS 200 non-null values
P1 30 non-null values
SMILES 200 non-null values
Molecule 200 non-null values
dtypes: object(20)>
Conversion to html is quite easy:
>>> htm = frame.to_html()
>>> str(htm[:36])
'<table border="1" class="dataframe">'
In order to support rendering the molecules as images in the HTML export of the dataframe, the __str__ method is monkey-patched to return a base64 encoded PNG:
>>> molX = Chem.MolFromSmiles('Fc1cNc2ccccc12')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
This can be reverted using the ChangeMoleculeRendering method
>>> ChangeMoleculeRendering(renderer='String')
>>> print(molX) # doctest: +SKIP
<rdkit.Chem.rdchem.Mol object at 0x10d179440>
>>> ChangeMoleculeRendering(renderer='PNG')
>>> print(molX) # doctest: +SKIP
<img src="data:image/png;base64,..." alt="Mol"/>
'''
from __future__ import print_function
from base64 import b64encode
import types, copy
from rdkit.six import BytesIO, string_types
from rdkit import Chem
from rdkit.Chem import Draw
try:
import pandas as pd
try:
v = pd.__version__.split('.')
except AttributeError:
# support for older versions of pandas
v = pd.version.version.split('.')
if v[0] == '0' and int(v[1]) < 10:
print("Pandas version %s not compatible with tests" % v, file=sys.stderr)
pd = None
else:
if 'display.width' in pd.core.config._registered_options:
pd.set_option('display.width', 1000000000)
if 'display.max_rows' in pd.core.config._registered_options:
pd.set_option('display.max_rows', 1000000000)
elif 'display.height' in pd.core.config._registered_options:
pd.set_option('display.height', 1000000000)
if 'display.max_colwidth' in pd.core.config._registered_options:
pd.set_option('display.max_colwidth', 1000000000)
#saves the default pandas rendering to allow restauration
defPandasRendering = pd.core.frame.DataFrame.to_html
except ImportError:
import traceback
traceback.print_exc()
pd = None
except Exception as e:
import sys
import traceback
traceback.print_exc()
pd = None
if pd:
try:
from pandas.formats import format as fmt
except ImportError:
from pandas.core import format as fmt # older versions
highlightSubstructures = True
molRepresentation = 'png' # supports also SVG
molSize = (200, 200)
def patchPandasHTMLrepr(self, **kwargs):
'''
Patched default escaping of HTML control characters to allow molecule image rendering dataframes
'''
formatter = fmt.DataFrameFormatter(
self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None,
force_unicode=None, bold_rows=True, classes=None, escape=False)
formatter.to_html()
html = formatter.buf.getvalue()
return html
def patchPandasHeadMethod(self, n=5):
'''Ensure inheritance of patched to_html in "head" subframe
'''
df = self[:n]
df.to_html = types.MethodType(patchPandasHTMLrepr, df)
df.head = types.MethodType(patchPandasHeadMethod, df)
return df
def _get_image(x):
"""displayhook function for PIL Images, rendered as PNG"""
import pandas as pd
bio = BytesIO()
x.save(bio, format='PNG')
s = b64encode(bio.getvalue()).decode('ascii')
pd.set_option('display.max_columns', len(s) + 1000)
pd.set_option('display.max_rows', len(s) + 1000)
if len(s) + 100 > pd.get_option("display.max_colwidth"):
pd.set_option("display.max_colwidth", len(s) + 1000)
return s
def _get_svg_image(mol, size=(200, 200), highlightAtoms=[]):
""" mol rendered as SVG """
from IPython.display import SVG
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
try:
# If no coordinates, calculate 2D
mol.GetConformer(-1)
except ValueError:
rdDepictor.Compute2DCoords(mol)
drawer = rdMolDraw2D.MolDraw2DSVG(*size)
drawer.DrawMolecule(mol, highlightAtoms=highlightAtoms)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:', '')
return SVG(svg).data # IPython's SVG clears the svg text
from rdkit import DataStructs
try:
from rdkit.Avalon import pyAvalonTools as pyAvalonTools
_fingerprinter = lambda x, y: pyAvalonTools.GetAvalonFP(x, isQuery=y, bitFlags=pyAvalonTools.avalonSSSBits)
except ImportError:
_fingerprinter = lambda x, y: Chem.PatternFingerprint(x, fpSize=2048)
def _molge(x, y):
"""Allows for substructure check using the >= operator (X has substructure Y -> X >= Y) by
monkey-patching the __ge__ function
This has the effect that the pandas/numpy rowfilter can be used for substructure filtering (filtered = dframe[dframe['RDKitColumn'] >= SubstructureMolecule])
"""
if x is None or y is None:
return False
if hasattr(x, '_substructfp'):
if not hasattr(y, '_substructfp'):
y._substructfp = _fingerprinter(y, True)
if not DataStructs.AllProbeBitsMatch(y._substructfp, x._substructfp):
return False
match = x.GetSubstructMatch(y)
if match:
if highlightSubstructures:
x.__sssAtoms = list(match)
else:
x.__sssAtoms = []
return True
else:
return False
Chem.Mol.__ge__ = _molge # lambda x,y: x.HasSubstructMatch(y)
def PrintAsBase64PNGString(x, renderer=None):
'''returns the molecules as base64 encoded PNG image
'''
if highlightSubstructures and hasattr(x, '__sssAtoms'):
highlightAtoms = x.__sssAtoms
else:
highlightAtoms = []
if molRepresentation.lower() == 'svg':
return _get_svg_image(x, highlightAtoms=highlightAtoms, size=molSize)
else:
return '<img src="data:image/png;base64,%s" alt="Mol"/>' % _get_image(
Draw.MolToImage(x, highlightAtoms=highlightAtoms, size=molSize))
def PrintDefaultMolRep(x):
return str(x.__repr__())
#Chem.Mol.__str__ = lambda x: '<img src="data:image/png;base64,%s" alt="Mol"/>'%get_image(Draw.MolToImage(x))
Chem.Mol.__str__ = PrintAsBase64PNGString
def _MolPlusFingerprint(m):
'''Precomputes fingerprints and stores results in molecule objects to accelerate substructure matching
'''
#m = Chem.MolFromSmiles(smi)
if m is not None:
m._substructfp = _fingerprinter(m, False)
return m
def RenderImagesInAllDataFrames(images=True):
'''Changes the default dataframe rendering to not escape HTML characters, thus allowing rendered images in all dataframes.
IMPORTANT: THIS IS A GLOBAL CHANGE THAT WILL AFFECT TO COMPLETE PYTHON SESSION. If you want to change the rendering only
for a single dataframe use the "ChangeMoleculeRendering" method instead.
'''
if images:
pd.core.frame.DataFrame.to_html = patchPandasHTMLrepr
else:
pd.core.frame.DataFrame.to_html = defPandasRendering
def AddMoleculeColumnToFrame(frame, smilesCol='Smiles', molCol='ROMol', includeFingerprints=False):
'''Converts the molecules contains in "smilesCol" to RDKit molecules and appends them to the dataframe "frame" using the specified column name.
If desired, a fingerprint can be computed and stored with the molecule objects to accelerate substructure matching
'''
if not includeFingerprints:
frame[molCol] = frame[smilesCol].map(Chem.MolFromSmiles)
else:
frame[molCol] = frame[smilesCol].map(
lambda smiles: _MolPlusFingerprint(Chem.MolFromSmiles(smiles)))
RenderImagesInAllDataFrames(images=True)
#frame.to_html = types.MethodType(patchPandasHTMLrepr,frame)
#frame.head = types.MethodType(patchPandasHeadMethod,frame)
def ChangeMoleculeRendering(frame=None, renderer='PNG'):
'''Allows to change the rendering of the molecules between base64 PNG images and string representations.
This serves two purposes: First it allows to avoid the generation of images if this is not desired and, secondly, it allows to enable image rendering for
newly created dataframe that already contains molecules, without having to rerun the time-consuming AddMoleculeColumnToFrame. Note: this behaviour is, because some pandas methods, e.g. head()
returns a new dataframe instance that uses the default pandas rendering (thus not drawing images for molecules) instead of the monkey-patched one.
'''
if renderer == 'String':
Chem.Mol.__str__ = PrintDefaultMolRep
else:
Chem.Mol.__str__ = PrintAsBase64PNGString
if frame is not None:
frame.to_html = types.MethodType(patchPandasHTMLrepr, frame)
def LoadSDF(filename, idName='ID', molColName='ROMol', includeFingerprints=False,
isomericSmiles=False, smilesName=None, embedProps=False):
'''Read file in SDF format and return as Pandas data frame.
If embedProps=True all properties also get embedded in Mol objects in the molecule column.
If molColName=None molecules would not be present in resulting DataFrame (only properties would be read).
'''
df = None
if isinstance(filename, string_types):
if filename.lower()[-3:] == ".gz":
import gzip
f = gzip.open(filename, "rb")
else:
f = open(filename, 'rb')
close = f.close
else:
f = filename
close = None # don't close an open file that was passed in
records = []
indices = []
for i, mol in enumerate(Chem.ForwardSDMolSupplier(f, sanitize=(molColName is not None))):
if mol is None:
continue
row = dict((k, mol.GetProp(k)) for k in mol.GetPropNames())
if molColName is not None and not embedProps:
for prop in mol.GetPropNames():
mol.ClearProp(prop)
if mol.HasProp('_Name'):
row[idName] = mol.GetProp('_Name')
if smilesName is not None:
row[smilesName] = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
if molColName is not None and not includeFingerprints:
row[molColName] = mol
elif molColName is not None:
row[molColName] = _MolPlusFingerprint(mol)
records.append(row)
indices.append(i)
if close is not None:
close()
RenderImagesInAllDataFrames(images=True)
return pd.DataFrame(records, index=indices)
from rdkit.Chem import SDWriter
def WriteSDF(df, out, molColName='ROMol', idName=None, properties=None, allNumeric=False):
'''Write an SD file for the molecules in the dataframe. Dataframe columns can be exported as SDF tags if specified in the "properties" list. "properties=list(df.columns)" would export all columns.
The "allNumeric" flag allows to automatically include all numeric columns in the output. User has to make sure that correct data type is assigned to column.
"idName" can be used to select a column to serve as molecule title. It can be set to "RowID" to use the dataframe row key as title.
'''
close = None
if isinstance(out, string_types):
if out.lower()[-3:] == ".gz":
import gzip
out = gzip.open(out, "wb")
close = out.close
writer = SDWriter(out)
if properties is None:
properties = []
else:
properties = list(properties)
if allNumeric:
properties.extend(
[dt for dt in df.dtypes.keys()
if (np.issubdtype(df.dtypes[dt], float) or np.issubdtype(df.dtypes[dt], int))])
if molColName in properties:
properties.remove(molColName)
if idName in properties:
properties.remove(idName)
writer.SetProps(properties)
for row in df.iterrows():
# make a local copy I can modify
mol = Chem.Mol(row[1][molColName])
if idName is not None:
if idName == 'RowID':
mol.SetProp('_Name', str(row[0]))
else:
mol.SetProp('_Name', str(row[1][idName]))
for p in properties:
cell_value = row[1][p]
# Make sure float does not get formatted in E notation
if np.issubdtype(type(cell_value), float):
s = '{:f}'.format(cell_value).rstrip("0") # "f" will show 7.0 as 7.00000
if s[-1] == ".":
s += "0" # put the "0" back on if it's something like "7."
mol.SetProp(p, s)
else:
mol.SetProp(p, str(cell_value))
writer.write(mol)
writer.close()
if close is not None:
close()
_saltRemover = None
def RemoveSaltsFromFrame(frame, molCol='ROMol'):
'''
Removes salts from mols in pandas DataFrame's ROMol column
'''
global _saltRemover
if _saltRemover is None:
from rdkit.Chem import SaltRemover
_saltRemover = SaltRemover.SaltRemover()
frame[molCol] = frame.apply(lambda x: _saltRemover.StripMol(x[molCol]), axis=1)
def SaveSMILESFromFrame(frame, outFile, molCol='ROMol', NamesCol='', isomericSmiles=False):
'''
Saves smi file. SMILES are generated from column with RDKit molecules. Column with names is optional.
'''
w = Chem.SmilesWriter(outFile, isomericSmiles=isomericSmiles)
if NamesCol != '':
for m, n in zip(frame[molCol], map(str, frame[NamesCol])):
m.SetProp('_Name', n)
w.write(m)
w.close()
else:
for m in frame[molCol]:
w.write(m)
w.close()
import numpy as np
import os
from rdkit.six.moves import cStringIO as StringIO
def SaveXlsxFromFrame(frame, outFile, molCol='ROMol', size=(300, 300)):
"""
Saves pandas DataFrame as a xlsx file with embedded images.
It maps numpy data types to excel cell types:
int, float -> number
datetime -> datetime
object -> string (limited to 32k character - xlsx limitations)
Cells with compound images are a bit larger than images due to excel.
Column width weirdness explained (from xlsxwriter docs):
The width corresponds to the column width value that is specified in Excel.
It is approximately equal to the length of a string in the default font of Calibri 11.
Unfortunately, there is no way to specify "AutoFit" for a column in the Excel file format.
This feature is only available at runtime from within Excel.
"""
import xlsxwriter # don't want to make this a RDKit dependency
cols = list(frame.columns)
cols.remove(molCol)
dataTypes = dict(frame.dtypes)
workbook = xlsxwriter.Workbook(outFile) # New workbook
worksheet = workbook.add_worksheet() # New work sheet
worksheet.set_column('A:A', size[0] / 6.) # column width
# Write first row with column names
c2 = 1
for x in cols:
worksheet.write_string(0, c2, x)
c2 += 1
c = 1
for index, row in frame.iterrows():
image_data = StringIO()
img = Draw.MolToImage(row[molCol], size=size)
img.save(image_data, format='PNG')
worksheet.set_row(c, height=size[1]) # looks like height is not in px?
worksheet.insert_image(c, 0, "f", {'image_data': image_data})
c2 = 1
for x in cols:
if str(dataTypes[x]) == "object":
worksheet.write_string(c, c2, str(row[x])[:32000]) # string length is limited in xlsx
elif ('float' in str(dataTypes[x])) or ('int' in str(dataTypes[x])):
if (row[x] != np.nan) or (row[x] != np.inf):
worksheet.write_number(c, c2, row[x])
elif 'datetime' in str(dataTypes[x]):
worksheet.write_datetime(c, c2, row[x])
c2 += 1
c += 1
workbook.close()
image_data.close()
def FrameToGridImage(frame, column='ROMol', legendsCol=None, **kwargs):
'''
Draw grid image of mols in pandas DataFrame.
'''
if legendsCol:
if legendsCol == frame.index.name:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame.index))), **kwargs)
else:
img = Draw.MolsToGridImage(frame[column], legends=list(map(str, list(frame[legendsCol]))),
**kwargs)
else:
img = Draw.MolsToGridImage(frame[column], **kwargs)
return img
from rdkit.Chem.Scaffolds import MurckoScaffold
def AddMurckoToFrame(frame, molCol='ROMol', MurckoCol='Murcko_SMILES', Generic=False):
'''
Adds column with SMILES of Murcko scaffolds to pandas DataFrame. Generic set to true results in SMILES of generic framework.
'''
if Generic:
frame[MurckoCol] = frame.apply(lambda x: Chem.MolToSmiles(MurckoScaffold.MakeScaffoldGeneric(MurckoScaffold.GetScaffoldForMol(x[molCol]))), axis=1)
else:
frame[MurckoCol] = frame.apply(
lambda x: Chem.MolToSmiles(MurckoScaffold.GetScaffoldForMol(x[molCol])), axis=1)
from rdkit.Chem import AllChem
def AlignMol(mol, scaffold):
"""
Aligns mol (RDKit mol object) to scaffold (SMILES string)
"""
scaffold = Chem.MolFromSmiles(scaffold)
AllChem.Compute2DCoords(scaffold)
AllChem.GenerateDepictionMatching2DStructure(mol, scaffold)
return mol
def AlignToScaffold(frame, molCol='ROMol', scaffoldCol='Murcko_SMILES'):
'''
Aligns molecules in molCol to scaffolds in scaffoldCol
'''
frame[molCol] = frame.apply(lambda x: AlignMol(x[molCol], x[scaffoldCol]), axis=1)
if __name__ == "__main__":
import sys
if pd is None:
print("pandas installation not found, skipping tests", file=sys.stderr)
else:
# version check
try:
v = pd.__version__.split('.')
except AttributeError:
# support for older versions of pandas
v = | pd.version.version.split('.') | pandas.version.version.split |
from incrementalSearch import incrementalSearch
from bisection import bisection
from newton import newton
from falseRule import falseRule
from fixedPoint import fixedPoint
from multipleRoots import multipleRoots
from gaussPartialPivot import partialPivot
from gaussSimple import gaussSimple
from gaussTotal import gaussTotal
from secant import secant
from template import template
from LUpivot import luPivot
from LUsimple import luSimple
from crout import crout
from gaussSeidel import gaussSeidel
from jacobi import jacobi
from sor import sor
import inspect
import pandas as pd
import ast
import numpy as np
def incOpt():
args = inspect.getfullargspec(incrementalSearch)[0]
return incrementalSearch(*defineParams(args))
def bicOpt():
args = inspect.getfullargspec(bisection)[0]
return bisection(*defineParams(args))
def newtonOpt():
args = inspect.getfullargspec(newton)[0]
return newton(*defineParams(args))
def falseOpt():
args = inspect.getfullargspec(falseRule)[0]
return falseRule(*defineParams(args))
def fixedOpt():
args = inspect.getfullargspec(fixedPoint)[0]
return fixedPoint(*defineParams(args))
def secantOpt():
args = inspect.getfullargspec(secant)[0]
return secant(*defineParams(args))
def multipleOpt():
args = inspect.getfullargspec(multipleRoots)[0]
return multipleRoots(*defineParams(args))
def simpleOpt():
args = inspect.getfullargspec(gaussSimple)[0]
return gaussSimple(*defineParams(args))
def partialOpt():
args = inspect.getfullargspec(partialPivot)[0]
return partialPivot(*defineParams(args))
def gaussTotalOpt():
args = inspect.getfullargspec(gaussTotal)[0]
return gaussTotal(*defineParams(args))
def luPivotOpt():
args = inspect.getfullargspec(luPivot)[0]
return luPivot(*defineParams(args))
def croutOpt():
args = inspect.getfullargspec(crout)[0]
return crout(*defineParams(args))
def luSimpleOpt():
args = inspect.getfullargspec(luSimple)[0]
return luSimple(*defineParams(args))
def templateOpt():
args = inspect.getfullargspec(template)[0]
return template(*defineParams(args))
def jacobiOpt():
args = inspect.getfullargspec(jacobi)[0]
return jacobi(*defineParams(args))
def gaussSeidelOpt():
args = inspect.getfullargspec(gaussSeidel)[0]
return gaussSeidel(*defineParams(args))
def sorOpt():
args = inspect.getfullargspec(sor)[0]
return sor(*defineParams(args))
def main():
print(
'0 for template\n'
'1 for incremental search\n'
'2 for bisection\n'
'3 for false rule\n'
'4 for newton\n'
'5 for fixed point\n'
'6 for secant\n'
'7 for multiple roots\n'
'8 for gauss simple\n'
'9 for gauss partial pivot\n'
'10 for gauss total pivot\n'
'11 for lu simple \n'
'12 for lu pivot \n'
'13 for lu crout \n'
'14 for jacobi \n'
'15 for gauss-seidel\n'
'16 for sor\n'
)
option = int(input())
switch = {
0: templateOpt,
1: incOpt,
2: bicOpt,
3: falseOpt,
4: newtonOpt,
5: fixedOpt,
6: secantOpt,
7: multipleOpt,
8: simpleOpt,
9: partialOpt,
10: gaussTotalOpt,
11: luSimpleOpt,
12: luPivotOpt,
13: croutOpt,
14: jacobiOpt,
15: gaussSeidelOpt,
16: sorOpt
}
func = switch.get(option, lambda: [{ 'status' : "Invalid option!!"}])
if(option <= 7):
showTable(func())
elif (option <= 12):
#[[4, −1, 0 ,3], [1 ,15.5 ,3 ,8], [0 ,−1.3 ,−4 ,1.1], [14,5,−2,30]]
#[-32, 31 , 13 , -12]
#[[-7,2,-3,4],[5,-1,14,-1],[1,9,-7,5],[-28,13,-8,-4]] [-12,13,31,-32]
#[[1,2,2,4,8],[0,1,1,5,9],[0,0,0,7,10],[0,0,1,8,11]]
showSteps(func())
else:
"""
A = [[7, -2, -2, -1], [-2, 8, -2, -2], [-2, -2, 6, 2],[-1, -2, -2, 10]]
b = [1, 1, 1, 1]
c = [0, 0, 0, 0]
norm = 2
tol = 10e-7
iters = 100
w = 1.5
"""
showObject(func())
return 0
def defineParams(params):
values = []
for param in params:
print('Enter %s ' % (param))
value = input()
try:
values.append(float(value))
except:
values.append(ast.literal_eval(value))
print('')
return values
def showSteps(steps):
for step in steps:
try:
print(pd.DataFrame(step).to_string(index=False, header=False)+"\n")
except:
print(step)
def showObject(res):
print(res['TMatrix'])
print(res['SpectRad'])
print(res['CMatrix'])
df = pd.DataFrame(res['Iters'])
size = res['Iters']
size = len(size[0]['x'])
cols = np.arange(size)
x = pd.DataFrame(df['x'].values.tolist(), columns=cols)
df = df.drop('x',axis = 1).drop('iter', axis=1)
df = pd.concat([ df.reset_index(drop=True), x.reset_index(drop= True) ], axis=1)
print(df)
def showTable(table):
result = ""
if('status' in table[-1]):
result = table[-1]
table.pop()
print( | pd.DataFrame(table) | pandas.DataFrame |
# Copyright 2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created by <NAME> and <NAME>
"""
Funtional Requirments: 8,9,10
Non-Functinal Requirements: 3
"""
import sys
from pattern.en import parse, pprint
from autocorrect import Speller
from pandas import DataFrame
import command_output_demo
# Function to tag specific verbs and action words and arrange them in a dataframe
def get_action(command_df):
dfAction = command_df.query("TAG=='VB' or TAG=='IN' or CHUNK=='I-VP'")[['WORD']]
action = []
for label, content in dfAction.WORD.items():
action.append(content)
action = ' '.join(action)
return action
# Function to tag specific nouns and descriptive nouns and arrange them in a dataframe
def get_objects(command_df):
dfobject = command_df.query("TAG=='NN' or TAG=='NNS' or CHUNK=='I-NP'")
objects = []
for label, content in dfobject.WORD.items():
objects.append(content)
objects = ' '.join(objects)
return objects
def parse_command(strInput):
lsParsed = parse(strInput, relations=True, lemmeta=True).split()
if len(lsParsed) < 1:
return strInput
df = | DataFrame(lsParsed[0], columns=["WORD", "TAG", "CHUNK", "ROLE", "PNP"]) | pandas.DataFrame |
from sqlalchemy import func
import pandas as pd
import numpy as np
from cswd.sql.base import session_scope
from cswd.sql.models import (Issue, StockDaily, Adjustment, DealDetail,
SpecialTreatment, SpecialTreatmentType)
DAILY_COLS = ['symbol', 'date',
'open', 'high', 'low', 'close',
'prev_close', 'change_pct',
'volume', 'amount', 'turnover', 'cmv', 'tmv']
OHLCV_COLS = ['open', 'high', 'low', 'close', 'volume']
MINUTELY_COLS = ['symbol', 'date'] + OHLCV_COLS
ADJUSTMENT_COLS = ['symbol', 'date', 'amount', 'ratio',
'record_date', 'pay_date', 'listing_date']
def get_exchange(code):
if code[0] in ('0', '3'):
return "SZSE"
else:
return "SSE"
def get_start_dates():
"""
股票上市日期
Examples
--------
>>> df = get_start_dates()
>>> df.head()
symbol start_date
0 000001 1991-04-03
1 000002 1991-01-29
2 000003 1991-01-14
3 000004 1991-01-14
4 000005 1990-12-10
"""
col_names = ['symbol', 'start_date']
with session_scope() as sess:
query = sess.query(
Issue.code,
Issue.A004_上市日期
).filter(
Issue.A004_上市日期.isnot(None)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def get_end_dates():
"""
股票结束日期。限定退市或者当前处于暂停上市状态的股票
Examples
--------
>>> df = get_end_dates()
>>> df.head()
symbol end_date
0 000003 2002-06-14
1 000013 2004-09-20
2 000015 2001-10-22
3 000024 2015-12-30
4 000033 2017-07-07
"""
col_names = ['symbol', 'end_date']
with session_scope() as sess:
query = sess.query(
SpecialTreatment.code,
func.max(SpecialTreatment.date)
).group_by(
SpecialTreatment.code
).having(
SpecialTreatment.treatment.in_(
[SpecialTreatmentType.delisting, SpecialTreatmentType.PT]
)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def get_latest_short_name():
"""
获取股票最新股票简称
Examples
--------
>>> df = get_end_dates()
>>> df.head()
symbol asset_name
0 000001 平安银行
1 000002 万 科A
2 000003 PT金田A
3 000004 国农科技
4 000005 世纪星源
"""
col_names = ['symbol', 'asset_name']
with session_scope() as sess:
query = sess.query(
StockDaily.code,
StockDaily.A001_名称
).group_by(
StockDaily.code
).having(
func.max(StockDaily.date)
)
df = pd.DataFrame.from_records(query.all())
df.columns = col_names
return df
def gen_asset_metadata(only_in=True):
"""
生成股票元数据
Paras
-----
only_in : bool
是否仅仅包含当前在市的股票,默认为真。
Examples
--------
>>> df = gen_asset_metadata()
>>> df.head()
symbol asset_name first_traded last_traded exchange auto_close_date \
0 000001 平安银行 1991-01-02 2018-04-19 SZSE 2018-04-20
1 000002 万 科A 1991-01-02 2018-04-19 SZSE 2018-04-20
2 000004 国农科技 1991-01-02 2018-04-19 SZSE 2018-04-20
3 000005 世纪星源 1991-01-02 2018-04-19 SZSE 2018-04-20
4 000006 深振业A 1992-04-27 2018-04-19 SZSE 2018-04-20
start_date end_date
0 1991-04-03 2018-04-19
1 1991-01-29 2018-04-19
2 1991-01-14 2018-04-19
3 1990-12-10 2018-04-19
4 1992-04-27 2018-04-19
"""
columns = ['symbol', 'first_traded', 'last_traded']
with session_scope() as sess:
query = sess.query(
StockDaily.code,
func.min(StockDaily.date),
func.max(StockDaily.date)
).filter(
~StockDaily.code.startswith('2')
).filter(
~StockDaily.code.startswith('9')
).group_by(
StockDaily.code
)
df = pd.DataFrame.from_records(query.all())
df.columns = columns
df['exchange'] = df['symbol'].map(get_exchange)
df['auto_close_date'] = df['last_traded'].map(
lambda x: x + pd.Timedelta(days=1))
latest_name = get_latest_short_name()
start_dates = get_start_dates()
end_dates = get_end_dates()
df = df.merge(
latest_name, 'left', on='symbol'
).merge(
start_dates, 'left', on='symbol'
).merge(
end_dates, 'left', on='symbol'
)
# 对于未退市的结束日期,以最后交易日期代替
df.loc[df.end_date.isna(), 'end_date'] = df.loc[df.end_date.isna(),
'last_traded']
if only_in:
df = df[~df.symbol.isin(end_dates.symbol)]
df.reset_index(inplace=True, drop=True)
return df
def _fill_zero(df):
"""填充因为停牌ohlc可能存在的0值"""
# 将close放在第一列
ohlc_cols = ['close', 'open', 'high', 'low']
ohlc = df[ohlc_cols].copy()
ohlc.replace(0.0, np.nan, inplace=True)
ohlc.close.fillna(method='ffill', inplace=True)
# 按列填充
ohlc.fillna(method='ffill', axis=1, inplace=True)
for col in ohlc_cols:
df[col] = ohlc[col]
return df
def fetch_single_equity(stock_code, start, end):
"""
从本地数据库读取股票期间日线交易数据
注
--
1. 除OHLCV外,还包括涨跌幅、成交额、换手率、流通市值、总市值、流通股本、总股本
2. 使用bcolz格式写入时,由于涨跌幅存在负数,必须剔除该列!!!
Parameters
----------
stock_code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> symbol = '000333'
>>> start_date = '2017-4-1'
>>> end_date = pd.Timestamp('2018-4-16')
>>> df = fetch_single_equity(symbol, start_date, end_date)
>>> df.iloc[:,:8]
symbol date open high low close prev_close change_pct
0 000333 2018-04-02 53.30 55.00 52.68 52.84 54.53 -3.0992
1 000333 2018-04-03 52.69 53.63 52.18 52.52 52.84 -0.6056
2 000333 2018-04-04 52.82 54.10 52.06 53.01 52.52 0.9330
3 000333 2018-04-09 52.91 53.31 51.00 51.30 53.01 -3.2258
4 000333 2018-04-10 51.45 52.80 51.18 52.77 51.30 2.8655
5 000333 2018-04-11 52.78 53.63 52.41 52.98 52.77 0.3980
6 000333 2018-04-12 52.91 52.94 51.84 51.87 52.98 -2.0951
7 000333 2018-04-13 52.40 52.47 51.01 51.32 51.87 -1.0603
8 000333 2018-04-16 51.31 51.80 49.15 49.79 51.32 -2.9813
"""
start = pd.Timestamp(start).date()
end = pd.Timestamp(end).date()
with session_scope() as sess:
query = sess.query(
StockDaily.code,
StockDaily.date,
StockDaily.A002_开盘价,
StockDaily.A003_最高价,
StockDaily.A004_最低价,
StockDaily.A005_收盘价,
StockDaily.A009_前收盘,
StockDaily.A011_涨跌幅,
StockDaily.A006_成交量,
StockDaily.A007_成交金额,
StockDaily.A008_换手率,
StockDaily.A013_流通市值,
StockDaily.A012_总市值
).filter(
StockDaily.code == stock_code,
StockDaily.date.between(start, end)
)
df = pd.DataFrame.from_records(query.all())
df.columns = DAILY_COLS
df = _fill_zero(df)
df['circulating_share'] = df.cmv / df.close
df['total_share'] = df.tmv / df.close
return df
def _handle_minutely_data(df, exclude_lunch):
"""
完成单个日期股票分钟级别数据处理
"""
dts = pd.to_datetime(df[1].map(str) + ' ' + df[2])
ohlcv = pd.Series(data=df[3].values, index=dts).resample('T').ohlc()
ohlcv.fillna(method='ffill', inplace=True)
# 成交量原始数据单位为手,换为股
volumes = pd.Series(data=df[4].values, index=dts).resample('T').sum() * 100
ohlcv.insert(4, 'volume', volumes)
if exclude_lunch:
# 默认包含上下界
# 与交易日历保持一致,自31分开始
pre = ohlcv.between_time('9:25', '9:31')
def key(x): return x.date()
grouped = pre.groupby(key)
opens = grouped['open'].first()
highs = grouped['high'].max()
lows = grouped['low'].min() # 考虑是否存在零值?
closes = grouped['close'].last()
volumes = grouped['volume'].sum()
index = pd.to_datetime([str(x) + ' 9:31' for x in opens.index])
add = pd.DataFrame({'open': opens.values,
'high': highs.values,
'low': lows.values,
'close': closes.values,
'volume': volumes.values
},
index=index)
am = ohlcv.between_time('9:32', '11:30')
pm = ohlcv.between_time('13:00', '15:00')
return pd.concat([add, am, pm])
else:
return ohlcv
def fetch_single_minutely_equity(stock_code, start, end, exclude_lunch=True):
"""
从本地数据库读取单个股票期间分钟级别交易明细数据
注
--
1. 仅包含OHLCV列
2. 原始数据按分钟进行汇总,first(open),last(close),max(high),min(low),sum(volume)
Parameters
----------
stock_code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
exclude_lunch : bool
是否排除午休时间,默认”是“
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> symbol = '000333'
>>> start_date = '2018-4-1'
>>> end_date = pd.Timestamp('2018-4-19')
>>> df = fetch_single_minutely_equity(symbol, start_date, end_date)
>>> df.tail()
close high low open volume
2018-04-19 14:56:00 51.55 51.56 51.50 51.55 376400
2018-04-19 14:57:00 51.55 51.55 51.55 51.55 20000
2018-04-19 14:58:00 51.55 51.55 51.55 51.55 0
2018-04-19 14:59:00 51.55 51.55 51.55 51.55 0
2018-04-19 15:00:00 51.57 51.57 51.57 51.57 353900
"""
start = pd.Timestamp(start).date()
end = pd.Timestamp(end).date()
with session_scope() as sess:
query = sess.query(
DealDetail.code,
DealDetail.date,
DealDetail.A001_时间,
DealDetail.A002_价格,
DealDetail.A004_成交量
).filter(
DealDetail.code == stock_code,
DealDetail.date.between(start, end)
)
df = pd.DataFrame.from_records(query.all())
if df.empty:
return pd.DataFrame(columns=OHLCV_COLS)
return _handle_minutely_data(df, exclude_lunch)
def fetch_single_quity_adjustments(stock_code, start, end):
"""
从本地数据库读取股票期间分红派息数据
Parameters
----------
stock_code : str
要获取数据的股票代码
start : datetime-like
自开始日期
end : datetime-like
至结束日期
return
----------
DataFrame对象
Examples
--------
>>> fetch_single_quity_adjustments('600000', '2010-4-1', '2018-4-16')
symbol date amount ratio record_date pay_date listing_date
0 600000 2010-06-10 0.150 0.3 2010-06-09 2010-06-11 2010-06-10
1 600000 2011-06-03 0.160 0.3 2011-06-02 2011-06-07 2011-06-03
2 600000 2012-06-26 0.300 0.0 2012-06-25 2012-06-26 2012-06-26
3 600000 2013-06-03 0.550 0.0 2013-05-31 2013-06-03 2013-06-03
4 600000 2014-06-24 0.660 0.0 2014-06-23 2014-06-24 2014-06-24
5 600000 2015-06-23 0.757 0.0 2015-06-19 2015-06-23 2015-06-23
6 600000 2016-06-23 0.515 0.1 2016-06-22 2016-06-24 2016-06-23
7 600000 2017-05-25 0.200 0.3 2017-05-24 2017-05-26 2017-05-25
"""
start = pd.Timestamp(start).date()
end = pd.Timestamp(end).date()
with session_scope() as sess:
query = sess.query(Adjustment.code,
Adjustment.date,
Adjustment.A002_派息,
Adjustment.A003_送股,
Adjustment.A004_股权登记日,
Adjustment.A005_除权基准日,
Adjustment.A006_红股上市日)
query = query.filter(Adjustment.code == stock_code)
query = query.filter(Adjustment.date.between(start, end))
df = pd.DataFrame.from_records(query.all())
if df.empty:
# 返回一个空表
return | pd.DataFrame(columns=ADJUSTMENT_COLS) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
| assert_panel_equal(sorted_panel, self.panel) | pandas.util.testing.assert_panel_equal |
import numpy as np
from fconcrete import config as c
import matplotlib.pyplot as plt
import time
import ezdxf
import pandas as pd
_Q = c._Q
def cond(x, singular=False, order=0):
"""
If It is singular, return 1 if x>0 else 0.
If It is not singular, return x**order if x>0 else 0
"""
if singular:
return 1 if x>0 else 0
return x**order if x>0 else 0
def integrate(f, a, b, N=100):
"""
Integrate f from a to b in N steps
"""
x = np.linspace(a, b, N)
y = np.apply_along_axis(f, 0, np.array([x]))
return np.trapz(y, dx=(b-a)/(N-1))
def duplicated(array):
"""
Check if it is duplicated.
"""
s = np.sort(array, axis=None)
duplicated = s[:-1][s[1:] == s[:-1]]
return np.isin(s, duplicated)
def to_unit(input, expected_unit, return_unit=False):
"""
Convert between unities according to expected_unit and return_unit.
Call signatures:
fc.helpers.to_unit(input, expected_unit, return_unit=False)
>>> unit1 = fc.helpers.to_unit("10cm", "m")
>>> unit1
0.1
>>> unit2 = fc.helpers.to_unit(20, "m", return_unit="cm")
>>> unit2
2000.0
Parameters
----------
input : number or str
Represents the input unit of the user.
expected_unit : str
The expected unit to be given. Useful when input is a number.
return_unit : `bool`, optional
The desired unit to return
"""
try:
input = float(input)
value = _Q(input, expected_unit)
except:
pass
try:
value = _Q(input).to(expected_unit)
except: raise Exception("String does not have valid format. See documentation.")
if return_unit:
return value.to(return_unit).magnitude
return value.magnitude
def getAxis(xy0=(0,0), xy1=(0,0)):
"""
Create axis with equal aspect. xy0 and xy1 represent the visible area.
"""
x0, y0 = xy0
x1, y1 = xy1
fig, ax = plt.subplots()
ax.set_aspect("equal")
ax.plot([x0, x1], [y0, y1], color="None")
return fig, ax
def timeit(do=True, name=""):
"""
Decorator to print the time that the function has taken to execute.
"""
def inner0(function):
if not do: return function
def inner(*args, **kw):
start = time.time()
val = function(*args, **kw)
end = time.time()
print("{} executed in {}s".format(function.__name__ if name == "" else name, end-start))
return val
return inner
return inner0
# https://gist.github.com/snakers4/91fa21b9dda9d055a02ecd23f24fbc3d
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def make_dxf(ax, **options):
"""
Matplotlib graph to modelspace (preparation to dxf).
Returns ax and msp.
"""
msp = options["msp"] if options.get("msp") else False
scale_y = options["scale_y"] if options.get("scale_y") else 1
scale_x = options["scale_x"] if options.get("scale_x") else 1
xy_position = options["xy_position"] if options.get("xy_position") else (0,0)
if msp == False:
doc = ezdxf.new('AC1032')
doc.header['$INSUNITS'] = 5
msp = doc.modelspace()
for element in ax.get_children():
element_type = str(type(element))
if element_type == "<class 'matplotlib.lines.Line2D'>":
xy_data = element.get_xydata()
xy_data[:, 1] = xy_data[:, 1]*scale_y
xy_data[:, 0] = xy_data[:, 0]*scale_x
points = xy_data[np.invert(np.isnan(xy_data[:, 1]))]+xy_position
msp.add_lwpolyline(points)
elif element_type == "<class 'matplotlib.patches.Rectangle'>":
#p1, p2 = element.get_bbox().get_points()
points = element.get_patch_transform().transform(element.get_path().vertices[:-1]) #np.array([p1, [p1[0], p2[1]], p2, [p2[0], p1[1]], p1])
points = np.array([*points, points[0]])+xy_position
msp.add_lwpolyline(points)
if element.get_hatch():
hatch = msp.add_hatch()
hatch.set_pattern_fill('ANSI31', scale=0.5, angle=element.angle)
hatch.paths.add_polyline_path(points, is_closed=1)
elif element_type == "<class 'matplotlib.patches.Circle'>":
msp.add_circle(np.array(element.center)+xy_position, element.radius)
return ax, msp
def to_pandas(array_table):
df_table = | pd.DataFrame(array_table) | pandas.DataFrame |
""" Prep a series of graphs for the algorithm diagram figure.
"""
from collections import defaultdict, Counter
from copy import deepcopy
from itertools import combinations
from math import sqrt, factorial # , comb
import numpy
import pandas
def binom_coeff(n, k):
"""
apparently `from math import comb` isn't working in reticulate right now,
so we gotta do it this way
"""
return 0 if k > n else int(factorial(n) / (factorial(k) * factorial(n - k)))
def create_optifit():
ref_otus = opticlust_example()
query_seqs = ["W", "X", "Y", "Z"]
query_dist_mat = dist_pairs_to_sets(
{
"seq1": ["X", "X", "X", "X", "Y", "W", "W", "W"],
"seq2": ["Y", "C", "G", "N", "C", "M", "N", "F"],
}
)
# original opticlust supplement example had 50 sequences, with 15 pairs
# being within the distance threshold.
# we now want to fit 4 query sequences to those reference OTUs.
# thus n_seqs = 50 + 4 = 54.
optifit = OptiFit(ref_otus, query_seqs, query_dist_mat, n_seqs=54)
return optifit
def write_iters(optifit_iters, outdir="figures/algorithm_steps"):
for i, iter_dir in enumerate(optifit_iters):
iter_dir["nodes"].to_csv(f"{outdir}/{i}_nodes")
iter_dir["edges"].to_csv(f"{outdir}/{i}_edges")
def opticlust_example():
dist_frame = {
"seq1": [
"D",
"F",
"G",
"H",
"I",
"I",
"J",
"J",
"N",
"O",
"P",
"P",
"P",
"Q",
"Q",
],
"seq2": [
"B",
"E",
"C",
"A",
"B",
"D",
"A",
"H",
"M",
"L",
"K",
"L",
"O",
"E",
"F",
],
}
assert len(dist_frame["seq1"]) == len(dist_frame["seq2"])
dist_mat = dist_pairs_to_sets(dist_frame)
otu_list = [
{},
{"I", "D", "B"},
{"F", "E", "Q"},
{"C", "G"},
{"H", "J", "A"},
{"M", "N"},
{"P", "L", "O"},
{"K"},
]
otus = otuMap.from_list(otu_list, dist_mat=dist_mat, n_seqs=50)
# print(otus)
# conf_mat = otus.conf_mat(dist_mat)
# print('mcc current:', mcc(conf_mat),
# '\nmcc from correct conf mat:',
# mcc({"tp": 14, "tn": 1210, "fp": 0, "fn": 1}),
# '\nmcc correct: 0.97')
return otus
def get_dists():
return pandas.DataFrame.from_dict(
{
"seq1": [
"D",
"F",
"G",
"H",
"I",
"I",
"J",
"J",
"N",
"O",
"P",
"P",
"P",
"Q",
"Q",
"X",
"X",
"X",
"X",
"Y",
"W",
"W",
"W",
],
"seq2": [
"B",
"E",
"C",
"A",
"B",
"D",
"A",
"H",
"M",
"L",
"K",
"L",
"O",
"E",
"F",
"Y",
"C",
"G",
"N",
"C",
"M",
"N",
"F",
],
}
)
def mcc(conf_mat):
"""
Calculate the Matthews Correlation coefficient given a confusion matrix.
"""
tp = conf_mat["tp"]
tn = conf_mat["tn"]
fp = conf_mat["fp"]
fn = conf_mat["fn"]
return round(
(tp * tn - fp * fn) / sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)), 2
)
def classify(true_otu, pred_otu):
"""
Classify a prediction as a true positive (tp), true negative (tn),
false positive (fp), or false negataive (fn).
"""
if true_otu and pred_otu:
result = "tp"
elif true_otu and not pred_otu:
result = "fn"
elif not true_otu and pred_otu:
result = "fp"
elif not true_otu and not pred_otu:
result = "tn"
else:
raise ValueError("this should never ever happen")
return result
def dist_pairs_to_sets(dframe):
"""
Convert a dict of 2 lists, with each sequence at the same index being within
the distance threshold, to a dict of sets.
:param dframe: {'seq1': ['A', 'A', 'B'],
'seq2': ['B', 'C', 'D']}
:return: {'A': {'B','C'}, 'B': {'A','C','D'}, 'C': {'A','B'}, 'D': {'B'}}
"""
dist_set = defaultdict(set)
for seq1, seq2 in zip(dframe["seq1"], dframe["seq2"]):
dist_set[seq1].add(seq2)
dist_set[seq2].add(seq1)
return dist_set
def dist_array_to_dframe(dist_array):
"""
:param dist_array: pd.DataFrame(dist_array,
columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
:return: pd.DataFrame.from_dict({'seq1': ['A', 'A', 'B'],
'seq2': ['B', 'C', 'D']})
"""
return # TODO
def otu_list_to_dict(otu_list):
"""
:param otu_list: should be of the form
[{seqA, seqB}, {seqC, secD}, {secE}]
where the index in the list is the OTU ID.
:return: an otuMap
"""
return {seq: idx for idx, otu in enumerate(otu_list) for seq in otu}
def format_seq(
s,
curr_seq,
query_seqs,
color_curr_seq=False,
do_color=True,
base_color="#000000",
ref_color="#D95F02",
query_color="#1B9E77",
):
"""
format sequences with bold for the current iteration seq and
color-code reference and query sequences.
"""
if s == curr_seq:
s = f"**{s}**"
color = query_color if color_curr_seq else base_color
else:
color = query_color if s in query_seqs else ref_color
return f"<span style = 'color:{color};'>{s}</span>" if do_color else s
class otuMap:
"""
Maps sequences to OTU assignments.
"""
def __init__(self, seqs_to_otus=None, dist_mat=None, n_seqs=0):
"""
:param seqs_to_otus: dict of the form {seqID: otuIndex},
e.g. {'seqA': 1, 'seqB': 2, 'seqC': 3}.
:param n_seqs: total number of sequences in dataset.
"""
self.seqs_to_otus = seqs_to_otus if seqs_to_otus else dict()
self.dist_mat = dist_mat if dist_mat else dict()
self.n_seqs = n_seqs
assert n_seqs >= len(self.seqs_to_otus)
def __repr__(self):
return "%s(%r)" % (self.__class__, self.seqs_to_otus)
def renumber_otus(self):
"""
re-number OTU ids so they're continuous
"""
old_otus = self.otus_to_seqs
if max(old_otus) != len(old_otus): # then OTU IDs are not continuous
self.seqs_to_otus = otu_list_to_dict(
[{}] + [otu for idx, otu in old_otus.items() if otu]
)
@classmethod
def from_list(cls, otu_list, **kwargs):
"""
:param otu_list: should be of the form
[{seqA, seqB}, {seqC, secD}, {secE}]
where the index in the list is the OTU ID.
:return: an otuMap
"""
return cls(seqs_to_otus=otu_list_to_dict(otu_list), **kwargs)
@property
def seqs(self):
return set(self.seqs_to_otus.keys())
@property
def otus_to_seqs(self):
"""
:return: dictionary of sets, with keys as OTU IDs and values as sets
containing sequence IDs.
"""
otu_dict_set = defaultdict(set)
for seq, otu_id in self.seqs_to_otus.items():
otu_dict_set[otu_id].add(seq)
return otu_dict_set
@property
def ghost_pairs(self):
"""
ghost_pairs: number of pairs from ghost sequences, calculated from the
distance matrix and total number of seqs (n_seqs).
Ghost sequences are not similar enough to any other seq
to be included in the distance matrix, thus they form singleton OTUs
and contribute to the number of true negatives.
These are not shown in the otuMap in order to save space.
"""
n_sim_seqs = len(self.dist_mat)
n_unsim_seqs = self.n_seqs - n_sim_seqs
# number of distances within the distance threshold, i.e. they're included in dist_mat
# n_dists = sum([len(dist_mat[s1]) for s1 in dist_mat]) / 2
return binom_coeff(n_unsim_seqs, 2) + n_unsim_seqs * n_sim_seqs
@property
def conf_mat(self):
"""
:return: a confusion matrix as a dictionary of counts containing
the keys 'tp', 'tn', 'fp', and 'fn'
"""
# build list of tp, tn, fp, & fn
classes = [
classify(
(seq2 in self.dist_mat[seq1]) or (seq1 in self.dist_mat[seq2]),
self.seqs_to_otus[seq1] == self.seqs_to_otus[seq2],
)
for seq1, seq2 in combinations(self.seqs_to_otus.keys(), 2)
]
# account for additional singleton sequences
classes.extend("tn" for i in range(self.ghost_pairs))
# convert to a dictionary of counts
return Counter(classes)
@property
def mcc(self):
return mcc(self.conf_mat)
@property
def dists_to_array(self):
"""
:param dist_mat: {'A': {'B','C'}, 'B': {'A','C','D'}, 'C': {'A','B'}, 'D': {'B'}}
:return: pd.DataFrame(dist_array,
columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
"""
dist_sets = self.dist_mat
seqs = {seq2 for seq1 in dist_sets for seq2 in dist_sets[seq1]}
seqs.update({seq for seq in dist_sets})
seqs = list(sorted(seqs))
len_seqs = len(seqs)
dist_array = numpy.zeros(len_seqs, len_seqs)
for i, seqi in enumerate(seqs):
for j, seqj in enumerate(seqs):
if i == j or seqi in dist_sets[seqj] or seqj in dist_sets[seqi]:
dist_array[i][j] = 1
dist_array[j][i] = 1
return pandas.DataFrame(dist_array, columns=seqs, index=seqs)
class OptiFit:
def __init__(self, ref_otus, query_seqs, query_dist_mat, n_seqs=0):
self.ref_otus = ref_otus
self.query_seqs = query_seqs
# create merged dist mat
dist_mat = self.ref_otus.dist_mat.copy()
for s1 in query_dist_mat:
dist_mat[s1].update(query_dist_mat[s1])
for s2 in query_dist_mat[s1]:
dist_mat[s2].add(s1)
# initialize OTUs from reference
seqs_to_otus = self.ref_otus.seqs_to_otus.copy()
# seed each query sequence as a singelton OTU
n_otus = len(self.ref_otus.otus_to_seqs)
for seq in self.query_seqs:
n_otus += 1
seqs_to_otus[seq] = n_otus
self.fitmap = otuMap(
seqs_to_otus=seqs_to_otus, dist_mat=dist_mat, n_seqs=n_seqs
)
def __repr__(self):
return "%s(%r)" % (self.__class__, self.fitmap)
@property
def mcc(self):
return self.fitmap.mcc
@property
def iterate(self):
iterations = list()
curr_fitmap = self.fitmap
prev_mcc = 0
while not numpy.isclose(prev_mcc, self.mcc):
prev_mcc = self.mcc
for seq in self.query_seqs:
if seq in self.fitmap.dist_mat:
iteration = OptiIter(curr_fitmap, seq, self.query_seqs)
iterations.append(iteration.to_dict)
curr_fitmap = iteration.best_map
return
@property
def iterate_obj(self):
iterations = list()
curr_fitmap = self.fitmap
prev_mcc = 0
while not numpy.isclose(prev_mcc, self.mcc):
prev_mcc = self.mcc
for seq in self.query_seqs:
if seq in self.fitmap.dist_mat:
iteration = OptiIter(curr_fitmap, seq, self.query_seqs)
iterations.append(iteration)
curr_fitmap = iteration.best_map
return iterations
class OptiIter:
def __init__(self, curr_fitmap, curr_seq, query_seqs):
"""
Calculate possible MCCs if the current seq is moved to different OTUs.
Store the nodes and edges as dictionaries for tidygraph.
"""
sim_seqs = curr_fitmap.dist_mat[curr_seq]
sim_seqs.add(curr_seq)
options = list()
edges = {"from": [], "to": [], "mcc": []}
for sim_seq in sim_seqs:
option = OptiOption(curr_fitmap, curr_seq, sim_seq)
options.append(option)
edges["from"].append(option.from_otu)
edges["to"].append(option.to_otu)
edges["mcc"].append(option.mcc)
self.edges = | pandas.DataFrame.from_dict(edges) | pandas.DataFrame.from_dict |
from PhiRelevance.PhiUtils1 import phiControl,phi
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
from random import seed
from random import randint
from random import random
class SmoteRRegression:
"""
Class SmoteRRegression takes arguments as follows:
data - Pandas data frame with target value as last column, rest columns should be features/attributes
method - "auto"(default, also called "extremes"),"range"
extrType - "high", "both"(default), "low"
thr_rel - user defined relevance threadhold between 0 to 1, all the target values with relevance above
the threshold are candicates to be oversampled
controlPts - list of control points formatted as [y1, phi(y1), phi'(y1), y2, phi(y2), phi'(y2)], where
y1: target value; phi(y1): relevane value of y1; phi'(y1): derivative of phi(y1), etc.
c_perc - under and over sampling strategy, Gaussian noise in this implementation should be applied in each bump with oversampling(interesting) sets,
possible types are defined below,
"balance" - will try to distribute the examples evenly across the existing bumps
"extreme" - invert existing frequency of interesting/uninteresting set
<percentage> - A list of percentage values with the following formats,
for any percentage value < 1, there should be either 1 percentage value applies to all bumps of undersampling set,
or multiple percentage values mapping to each bump of undersampling set;
for any percentage value > 1, there should be either 1 percentage value applies to all bumps of oversampling set
or multiple percentage values mapping to each bump of oversampling set;
k - The number of nearest neighbors, default value is 5
"""
def __init__(self, data, method='auto', extrType='both', thr_rel=1.0, controlPts=[], c_perc="balance", k=5):
seed(1)
self.data = data;
self.method = 'extremes' if method in ['extremes', 'auto'] else 'range'
if self.method == 'extremes':
if extrType in ['high','low','both']:
self.extrType = extrType
else:
self.extrType = 'both'
else:
self.extrType =''
self.thr_rel = thr_rel
if method == 'extremes':
self.controlPts = []
else:
self.controlPts = controlPts
self.c_perc_undersampling = []
self.c_perc_oversampling = []
if str == type(c_perc):
self.c_perc = c_perc if c_perc in ["balance", "extreme"] else c_perc
elif list == type(c_perc):
self.c_perc = 'percentage list'
self.processCPerc(c_perc)
self.k = k
self.coef = 1.5
def processCPerc(self, c_perc):
for x in c_perc:
if x < 1.0:
self.c_perc_undersampling.append(float(x))
elif x > 1.0:
self.c_perc_oversampling.append(float(x))
else:
print('c_perc value in list should not be 1!')
print(f'c_perc_undersampling: {self.c_perc_undersampling}')
print(f'c_perc_oversampling: {self.c_perc_oversampling}')
def getMethod(self):
return self.method
def getData(self):
return self.data
def getExtrType(self):
return self.extrType
def getThrRel(self):
return self.thr_rel
def getControlPtr(self):
return self.controlPts
def getCPerc(self):
if self.c_perc in ['balance', 'extreme']:
return self.c_perc
else:
return self.c_perc_undersampling, self.c_perc_oversampling
def getK(self):
return self.k
def set_obj_interesting_set(self, data):
self.interesting_set = self.get_interesting_set(data)
def get_obj_interesting_set(self):
return self.interesting_set
def set_obj_uninteresting_set(self, data):
self.uninteresting_set = self.get_uninteresting_set(data)
def get_obj_uninteresting_set(self):
return self.uninteresting_set
def set_obj_bumps(self, data):
self.bumps_undersampling, self.bumps_oversampling = self.calc_bumps(data)
def get_obj_bumps(self):
return self.bumps_undersampling, self.bumps_oversampling
def resample(self):
yPhi, ydPhi, yddPhi = self.calc_rel_values()
data1 = self.preprocess_data(yPhi)
#interesting set
self.set_obj_interesting_set(data1)
#uninteresting set
self.set_obj_uninteresting_set(data1)
#calculate bumps
self.set_obj_bumps(data1)
if self.c_perc == 'percentage list':
resampled = self.process_percentage()
elif self.c_perc == 'balance':
resampled = self.process_balance()
elif self.c_perc == 'extreme':
resampled = self.process_extreme()
return resampled
def preprocess_data(self, yPhi):
#append column 'yPhi'
data1 = self.data
data1['yPhi'] = yPhi
data1 = self.data.sort_values(by=['Tgt'])
return data1
def get_uninteresting_set(self, data):
uninteresting_set = data[data.yPhi < self.thr_rel]
return uninteresting_set
def get_interesting_set(self, data):
interesting_set = data[data.yPhi >= self.thr_rel]
return interesting_set
def calc_rel_values(self):
#retrieve target(last column) from DataFrame
y = self.data.iloc[:,-1]
#generate control ptrs
if self.method == 'extremes':
controlPts, npts = phiControl(y, extrType=self.extrType)
else:
controlPts, npts = phiControl(y, 'range', extrType="", controlPts=self.controlPts)
#calculate relevance value
yPhi, ydPhi, yddPhi = phi(y, controlPts, npts, self.method)
return yPhi, ydPhi, yddPhi
def calc_bumps(self, df):
thr_rel = self.thr_rel
less_than_thr_rel = True if df.loc[0,'yPhi'] < thr_rel else False
bumps_oversampling = []
bumps_undersampling = []
bumps_oversampling_df = pd.DataFrame(columns = df.columns)
bumps_undersampling_df = pd.DataFrame(columns = df.columns)
for idx, row in df.iterrows():
if less_than_thr_rel and (row['yPhi'] < thr_rel):
bumps_undersampling_df = bumps_undersampling_df.append(row)
elif less_than_thr_rel and row['yPhi'] >= thr_rel:
bumps_undersampling.append(bumps_undersampling_df)
bumps_undersampling_df = pd.DataFrame(columns = df.columns)
bumps_oversampling_df = bumps_oversampling_df.append(row)
less_than_thr_rel = False
elif (not less_than_thr_rel) and (row['yPhi'] >= thr_rel):
bumps_oversampling_df = bumps_oversampling_df.append(row)
elif (not less_than_thr_rel) and (row['yPhi'] < thr_rel):
bumps_oversampling.append(bumps_oversampling_df)
bumps_oversampling_df = pd.DataFrame(columns = df.columns)
bumps_undersampling_df = bumps_undersampling_df.append(row)
less_than_thr_rel = True
if less_than_thr_rel and (df.iloc[-1,:]['yPhi'] < thr_rel):
bumps_undersampling.append(bumps_undersampling_df)
elif not less_than_thr_rel and (df.iloc[-1,:]['yPhi'] >= thr_rel):
bumps_oversampling.append(bumps_oversampling_df)
return bumps_undersampling, bumps_oversampling
def process_percentage(self):
undersampling_and_interesting, new_samples_set = self.preprocess_percentage()
reduced_cols = new_samples_set.columns.values.tolist()[:-1]
dups_sample_counts = new_samples_set.pivot_table(index=reduced_cols, aggfunc='size')
interesting_set_list = self.interesting_set.iloc[:,:-1].values.tolist()
#new samples from smote
new_samples_smote = []
for index, value in dups_sample_counts.items():
base_sample = list(index)
#print(f'base_sample={base_sample}')
kNN_result = self.kNN_calc(self.k, base_sample, interesting_set_list)
#Generating new samples
for x in range(value):
idx = randint(0, 4)
#print(f'x={x},idx={idx}')
nb = kNN_result[idx]
#Generate attribute values
new_sample = []
for y in range(len(base_sample)-1):
diff = abs(base_sample[y]-nb[y])
new_sample.append(base_sample[y]+random()*diff)
#Calc target value
a = np.array(new_sample)
b = np.array(base_sample[:-1])
d1 = np.linalg.norm(a-b)
c = np.array(nb[:-1])
d2 = np.linalg.norm(a-c)
new_target = (d2*base_sample[-1]+d1*nb[-1])/(d1+d2)
new_sample.append(new_target)
#print(f'new_sample={new_sample}')
new_samples_smote.append(new_sample)
print(f'len={len(new_samples_smote)}')
#print(f'{new_samples_smote}')
#Generate final result
undersampling_and_interesting.drop('yPhi',axis=1,inplace=True )
df_new_samples_smote = pd.DataFrame(new_samples_smote)
df_new_samples_smote.columns = reduced_cols
frames = [undersampling_and_interesting, df_new_samples_smote]
result = pd.concat(frames)
return result
def preprocess_percentage(self):
#process undersampling
len_c_perc_undersampling = len(self.c_perc_undersampling)
print(f'len_c_perc_undersampling={len_c_perc_undersampling}')
len_bumps_undersampling = len(self.bumps_undersampling)
print(f'len_bumps_undersampling={len_bumps_undersampling}')
resampled_sets = []
if len_c_perc_undersampling == 0:
print('no undersampling, append uninteresting set directly')
resampled_sets.append(self.uninteresting_set)
elif len_c_perc_undersampling == 1:
undersample_perc = self.c_perc_undersampling[0]
print('len(self.c_perc) == 1')
print(f'process_percentage(): undersample_perc={undersample_perc}')
#iterate undersampling bumps to apply undersampling percentage
for s in self.bumps_undersampling:
print(f'process_percentage(): bump size={len(s)}')
resample_size = round(len(s)*undersample_perc)
print(f'process_percentage(): resample_size={resample_size}')
resampled_sets.append(s.sample(n = resample_size))
elif len_c_perc_undersampling == len_bumps_undersampling:
for i in range(len(self.bumps_undersampling)):
print(f'len(self.c_perc) > 1 loop i={i}')
undersample_perc = self.c_perc_undersampling[i]
print(f'process_percentage(): undersample_perc={undersample_perc}')
resample_size = round(len(self.bumps_undersampling[i])*undersample_perc)
print(f'process_percentage(): resample_size={resample_size}')
resampled_sets.append(self.bumps_undersampling[i].sample(n = resample_size))
else:
print(f'length of c_perc for undersampling {len_c_perc_undersampling} != length of bumps undersampling {len_bumps_undersampling}')
#uninteresting bumps are now stored in list resampled_sets
#also adding original interesting set
resampled_sets.append(self.interesting_set)
#Oversampling with SmoteR
len_c_perc_oversampling = len(self.c_perc_oversampling)
print(f'len_c_perc_oversampling={len_c_perc_oversampling}')
len_bumps_oversampling = len(self.bumps_oversampling)
print(f'len_bumps_oversampling={len_bumps_oversampling}')
resampled_oversampling_set = []
if len(self.c_perc_oversampling) == 1:
#oversampling - new samples set
c_perc_frac, c_perc_int = 0.0, 0.0
for s in self.bumps_oversampling:
# size of the new samples
print(f'c_perc_oversampling[0]={self.c_perc_oversampling[0]}')
if self.c_perc_oversampling[0]>1.0 and self.c_perc_oversampling[0]<2.0:
size_new_samples_set = round(len(s)*(self.c_perc_oversampling[0]-1))
print(f'size_new_samples_set={size_new_samples_set}')
resampled_oversampling_set.append(s.sample(n = size_new_samples_set))
elif self.c_perc_oversampling[0]>2.0:
c_perc_frac, c_perc_int = math.modf(self.c_perc_oversampling[0])
print(f'c_perc_int, c_perc_frac =={c_perc_int, c_perc_frac}')
if c_perc_frac > 0.0:
size_frac_new_samples_set = round(len(s)*c_perc_frac)
resampled_oversampling_set.append(s.sample(n=size_frac_new_samples_set))
ss = s.loc[s.index.repeat(int(c_perc_int)-1)]
resampled_oversampling_set.append(ss)
elif len_c_perc_oversampling == len_bumps_oversampling:
for i in range(len(self.bumps_oversampling)):
print(f'len(self.c_perc) > 1 loop i={i}')
c_perc_bump = self.c_perc_oversampling[i]
print(f'process_percentage(): undersample_perc={c_perc_bump}')
if c_perc_bump>1.0 and c_perc_bump<2.0:
size_new_samples_set = round(len(s)*(c_perc_bump-1))
print(f'size_new_samples_set={size_new_samples_set}')
resampled_oversampling_set.append(s.sample(n = size_new_samples_set))
elif c_perc_bump>2.0:
c_perc_frac, c_perc_int = math.modf(self.c_perc_oversampling[0])
print(f'c_perc_int, c_perc_frac =={c_perc_int, c_perc_frac}')
if c_perc_frac>0.0:
size_frac_new_samples_set = round(len(self.bumps_oversampling[i])*c_perc_frac)
resampled_oversampling_set.append(self.bumps_oversampling[i].sample(n=size_frac_new_samples_set))
ss = self.bumps_oversampling[i].loc[self.bumps_oversampling[i].index.repeat(int(c_perc_int)-1)]
resampled_oversampling_set.append(ss)
else:
print(f'length of c_perc for oversampling {len_c_perc_oversampling} != length of bumps oversampling {len_bumps_oversampling}')
#Combining all undersampling sets and interesting set
undersampling_and_interesting = pd.concat(resampled_sets)
#Combining all new samples
new_samples_set = pd.concat(resampled_oversampling_set)
return undersampling_and_interesting, new_samples_set
def kNN_calc(self, k, sample_as_list, interesting_set_list):
a = np.array(sample_as_list[:-1])
for sample_interesting in interesting_set_list:
b = np.array(sample_interesting[:-1])
dist = np.linalg.norm(a-b)
sample_interesting.append(dist)
kNN_result = sorted(interesting_set_list, key=lambda x:x[-1])[1:(k+1)]
for j in interesting_set_list:
del j[-1]
return kNN_result
def process_balance(self):
new_samples_set = self.preprocess_balance()
reduced_cols = new_samples_set.columns.values.tolist()[:-1]
dups_sample_counts = new_samples_set.pivot_table(index=reduced_cols, aggfunc='size')
interesting_set_list = self.interesting_set.iloc[:,:-1].values.tolist()
#new samples from smote
new_samples_smote = []
for index, value in dups_sample_counts.items():
base_sample = list(index)
#print(f'base_sample={base_sample}')
kNN_result = self.kNN_calc(self.k, base_sample, interesting_set_list)
#Generating new samples
for x in range(value):
idx = randint(0, 4)
#print(f'x={x},idx={idx}')
nb = kNN_result[idx]
#Generate attribute values
new_sample = []
for y in range(len(base_sample)-1):
diff = abs(base_sample[y]-nb[y])
new_sample.append(base_sample[y]+random()*diff)
#Calc target value
a = np.array(new_sample)
b = np.array(base_sample[:-1])
d1 = np.linalg.norm(a-b)
c = np.array(nb[:-1])
d2 = np.linalg.norm(a-c)
new_target = (d2*base_sample[-1]+d1*nb[-1])/(d1+d2)
new_sample.append(new_target)
#print(f'new_sample={new_sample}')
new_samples_smote.append(new_sample)
print(f'len={len(new_samples_smote)}')
#print(f'{new_samples_smote}')
#Generate final result
data = self.getData()
data.drop('yPhi',axis=1,inplace=True )
df_new_samples_smote = pd.DataFrame(new_samples_smote)
df_new_samples_smote.columns = reduced_cols
frames = [data, df_new_samples_smote]
result = | pd.concat(frames) | pandas.concat |
import os
import pandas as pd
import numpy as np
import untangle
import requests
import scripts.manipulation as manipulation
mais_path = "../../bd+/mais_projects/data/alesp"
def parse_deputados(download=True): # sourcery no-metrics
if download:
r = requests.get(
"https://www.al.sp.gov.br/repositorioDados/deputados/deputados.xml"
)
obj = untangle.parse(r.text)
cols = [
"idDeputado",
"nomeParlamentar",
"aniversario",
"partido",
"situacao",
"email",
"sala",
"placaVeiculo",
"biografia",
"homePage",
"andar",
"fax",
"matricula",
"IdSPL",
]
l = len(obj.Deputados.Deputado)
for i in range(l):
line = []
# print(i)
a = obj.Deputados.Deputado[i].IdDeputado.cdata
b = obj.Deputados.Deputado[i].NomeParlamentar.cdata
c = obj.Deputados.Deputado[i].Aniversario.cdata
try:
e = obj.Deputados.Deputado[i].Partido.cdata
except:
e = np.nan
# i = obj.Deputados.Deputado[i].Telefone
f = obj.Deputados.Deputado[i].Situacao.cdata
g = obj.Deputados.Deputado[i].Email.cdata
try:
h = obj.Deputados.Deputado[i].Sala.cdata
except:
h = np.nan
# i = obj.Deputados.Deputado[i].Telefone
try:
j = obj.Deputados.Deputado[i].PlacaVeiculo.cdata
except:
j = np.nan
k = obj.Deputados.Deputado[i].Biografia.cdata
try:
l = obj.Deputados.Deputado[i].HomePage.cdata
except:
l = np.nan
try:
m = obj.Deputados.Deputado[i].Andar.cdata
except:
m = np.nan
try:
n = obj.Deputados.Deputado[i].Fax.cdata
except:
n = np.nan
o = obj.Deputados.Deputado[i].Matricula.cdata
p = obj.Deputados.Deputado[i].IdSPL.cdata
line = [a, b, c, e, f, g, h, j, k, l, m, n, o, p]
df = pd.DataFrame([line], columns=cols)
if i == 0:
df.to_csv(
"../data/servidores/deputados_alesp_aux.csv",
index=False,
encoding="utf-8",
)
else:
df.to_csv(
"../data/servidores/deputados_alesp_aux.csv",
index=False,
encoding="utf-8",
header=False,
mode="a",
)
deputados = | pd.read_csv("../data/servidores/deputados_alesp_aux.csv") | pandas.read_csv |
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
'C': pd.Series(['x', 'x', 'x'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='int64'),
'C': pd.Series(['y', 'y', 'y'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'd'], dtype='category'),
'B': pd.Series([101, 102, 104], dtype='int64'),
'C': pd.Series(['z', 'z', 'z'], dtype='category'),
}
),
]
result = categorical_df_concat(inp)
expected = pd.DataFrame(
{
'A': pd.Series(
['a', 'b', 'c', 'c', 'b', 'd', 'a', 'b', 'd'],
dtype='category'
),
'B': pd.Series(
[100, 102, 103, 103, 102, 104, 101, 102, 104],
dtype='int64'
),
'C': pd.Series(
['x', 'x', 'x', 'y', 'y', 'y', 'z', 'z', 'z'],
dtype='category'
),
},
)
expected.index = pd.Int64Index([0, 1, 2, 0, 1, 2, 0, 1, 2])
assert_equal(expected, result)
assert_equal(
expected['A'].cat.categories,
result['A'].cat.categories
)
assert_equal(
expected['C'].cat.categories,
result['C'].cat.categories
)
def test_categorical_df_concat_value_error(self):
mismatched_dtypes = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': | pd.Series([100, 102, 103], dtype='int64') | pandas.Series |
# This file is generated from image_classification/dataset.md automatically through:
# d2lbook build lib
# Don't edit it directly
#@save_all
#@hide_all
import pathlib
import pandas as pd
from matplotlib import pyplot as plt
from typing import Union, Sequence, Callable, Optional
import fnmatch
import numpy as np
import unittest
from d8 import core
class Dataset(core.BaseDataset):
"""The class of an image classification dataset."""
def __init__(self, df: pd.DataFrame, reader: core.Reader):
super().__init__(df, reader, label_name='class_name')
TYPE = 'image_classification'
def show(self, layout=(2,8)) -> None:
"""Show several random examples with their labels.
:param layout: A tuple of (number of rows, number of columns).
"""
nrows, ncols = layout
max_width=300
scale = 14 / ncols
figsize = (ncols * scale, nrows * scale)
_, axes = plt.subplots(nrows, ncols, figsize=figsize)
samples = self.df.sample(n=nrows*ncols, random_state=0)
for ax, (_, sample) in zip(axes.flatten(), samples.iterrows()):
ax.set_title(sample['class_name'])
img = self.reader.read_image(sample['file_path'], max_width=max_width)
ax.imshow(img)
ax.axis("off")
def _summary(self) -> pd.DataFrame:
"""Returns a summary about this dataset."""
get_mean_std = lambda col: f'{col.mean():.1f} ± {col.std():.1f}'
img_df = self.reader.get_image_info(self.df['file_path'])
return pd.DataFrame([{'# images':len(img_df),
'# classes':len(self.classes),
'image width':get_mean_std(img_df['width']),
'image height':get_mean_std(img_df['height']),
'size (GB)':img_df['size (KB)'].sum()/2**20,}])
def __getitem__(self, idx):
if idx < 0 or idx > self.__len__():
raise IndexError(f'index {idx} out of range [0, {self.__len__()})')
file_path = self.df['file_path'][idx]
img = self.reader.read_image(file_path)
return np.array(img), self.df['class_name'][idx]
def to_mxnet(self):
"""Returns a MXNet dataset instance"""
import mxnet as mx
class MXDataset(mx.gluon.data.Dataset):
def __init__(self, dataset):
self.data = dataset
self.label_to_idx = {n:i for i, n in enumerate(self.data.classes)}
self.classes = dataset.classes
def __getitem__(self, idx):
file_path = self.data.df['file_path'][idx]
img = self.data.reader.read_image(file_path)
img = mx.nd.array(img)
label = self.label_to_idx[self.data.df['class_name'][idx]]
return img, label
def __len__(self):
return len(self.data.df)
return MXDataset(self)
@classmethod
def from_folders(cls, data_path: Union[str, Sequence[str]],
folders: Union[str, Sequence[str]]) -> 'Dataset':
"""Create a dataset when images from the same class are stored in the same folder.
:param data_path: Either a URL or a local path. For the former, data will be downloaded automatically.
:param folders: The folders containing all example images.
:return: The created dataset.
"""
if isinstance(folders, (str, pathlib.Path)): folders = [folders]
def label_func(file_path):
for folder in folders:
if fnmatch.fnmatch(str(file_path.parent.parent), folder):
return file_path.parent.name
return None
return cls.from_label_func(data_path, label_func)
@classmethod
def from_label_func(cls, data_path: Union[str, Sequence[str]],
label_func: Callable[[pathlib.Path], str]) -> 'Dataset':
"""Create a dataset from a function that maps a image path to its class name.
:param data_path: Either a URL or a local path. For the former, data will be downloaded automatically.
:param label_func: A function takes an image path (an instance :class:`pathlib.Path`) to return a string class name or a None to skip this image.
:return: The created dataset.
:param data_path:
"""
reader = core.create_reader(data_path)
entries = []
for file_path in reader.list_images():
lbl = label_func(file_path)
if lbl: entries.append({'file_path':file_path, 'class_name':lbl})
df = | pd.DataFrame(entries) | pandas.DataFrame |
import asyncio
import concurrent.futures
import itertools
import logging
import math
import random
import sys
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
import scipy
import toolz
from dask.distributed import Future
from distributed.utils_test import ( # noqa: F401
captured_logger,
cluster,
gen_cluster,
loop,
)
from scipy.stats import uniform
from sklearn.base import clone
from sklearn.cluster import MiniBatchKMeans
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import ParameterGrid, ParameterSampler
from sklearn.utils import check_random_state
from dask_ml._compat import DISTRIBUTED_2_5_0
from dask_ml.datasets import make_classification
from dask_ml.model_selection import (
HyperbandSearchCV,
IncrementalSearchCV,
InverseDecaySearchCV,
)
from dask_ml.model_selection._incremental import _partial_fit, _score, fit
from dask_ml.model_selection.utils_test import LinearFunction, _MaybeLinearFunction
from dask_ml.utils import ConstantFunction
pytestmark = [
pytest.mark.skipif(not DISTRIBUTED_2_5_0, reason="hangs"),
pytest.mark.filterwarnings("ignore:decay_rate"),
] # decay_rate warnings are tested in test_incremental_warns.py
@gen_cluster(client=True, timeout=1000)
async def test_basic(c, s, a, b):
def _additional_calls(info):
pf_calls = {k: v[-1]["partial_fit_calls"] for k, v in info.items()}
ret = {k: int(calls < 10) for k, calls in pf_calls.items()}
if len(ret) == 1:
return {list(ret)[0]: 0}
# Don't train one model (but keep model 0)
some_keys = set(ret.keys()) - {0}
key_to_drop = random.choice(list(some_keys))
return {k: v for k, v in ret.items() if k != key_to_drop}
X, y = make_classification(n_samples=1000, n_features=5, chunks=100)
model = ConstantFunction()
params = {"value": uniform(0, 1)}
X_test, y_test = X[:100], y[:100]
X_train = X[100:]
y_train = y[100:]
n_parameters = 5
param_list = list(ParameterSampler(params, n_parameters))
info, models, history, best = await fit(
model,
param_list,
X_train,
y_train,
X_test,
y_test,
_additional_calls,
fit_params={"classes": [0, 1]},
)
# Ensure that we touched all data
keys = {t[0] for t in s.transition_log}
L = [str(k) in keys for kk in X_train.__dask_keys__() for k in kk]
assert all(L)
for model in models.values():
assert isinstance(model, Future)
model2 = await model
assert isinstance(model2, ConstantFunction)
XX_test = await c.compute(X_test)
yy_test = await c.compute(y_test)
model = await models[0]
assert model.score(XX_test, yy_test) == info[0][-1]["score"]
# `<` not `==` because we randomly dropped one model every iteration
assert len(history) < n_parameters * 10
for h in history:
assert {
"partial_fit_time",
"score_time",
"score",
"model_id",
"params",
"partial_fit_calls",
}.issubset(set(h.keys()))
groups = toolz.groupby("partial_fit_calls", history)
assert len(groups[1]) > len(groups[2]) > len(groups[3]) > len(groups[max(groups)])
assert max(groups) == n_parameters
keys = list(models.keys())
for key in keys:
del models[key]
while c.futures or s.tasks: # Make sure cleans up cleanly after running
await asyncio.sleep(0.1)
# smoke test for ndarray X_test and y_test
X_test = await c.compute(X_test)
y_test = await c.compute(y_test)
info, models, history, best = await fit(
model,
param_list,
X_train,
y_train,
X_test,
y_test,
_additional_calls,
fit_params={"classes": [0, 1]},
)
assert True # smoke test to make sure reached
def test_partial_fit_doesnt_mutate_inputs():
n, d = 100, 20
X, y = make_classification(
n_samples=n, n_features=d, random_state=42, chunks=(n, d)
)
X = X.compute()
y = y.compute()
meta = {
"iterations": 0,
"mean_copy_time": 0,
"mean_fit_time": 0,
"partial_fit_calls": 0,
}
model = SGDClassifier(tol=1e-3)
model.partial_fit(X[: n // 2], y[: n // 2], classes=np.unique(y))
new_model, new_meta = _partial_fit(
(model, meta), X[n // 2 :], y[n // 2 :], fit_params={"classes": np.unique(y)}
)
assert meta != new_meta
assert new_meta["partial_fit_calls"] == 1
assert not np.allclose(model.coef_, new_model.coef_)
assert model.t_ < new_model.t_
assert new_meta["partial_fit_time"] >= 0
new_meta2 = _score((model, new_meta), X[n // 2 :], y[n // 2 :], None)
assert new_meta2["score_time"] >= 0
assert new_meta2 != new_meta
@gen_cluster(client=True)
async def test_explicit(c, s, a, b):
X, y = make_classification(n_samples=1000, n_features=10, chunks=(200, 10))
model = SGDClassifier(tol=1e-3, penalty="elasticnet")
params = [{"alpha": 0.1}, {"alpha": 0.2}]
def additional_calls(scores):
"""Progress through predefined updates, checking along the way"""
ts = scores[0][-1]["partial_fit_calls"]
ts -= 1 # partial_fit_calls = time step + 1
if ts == 0:
assert len(scores) == len(params)
assert len(scores[0]) == 1
assert len(scores[1]) == 1
return {k: 2 for k in scores}
if ts == 2:
assert len(scores) == len(params)
assert len(scores[0]) == 2
assert len(scores[1]) == 2
return {0: 1, 1: 0}
elif ts == 3:
assert len(scores) == len(params)
assert len(scores[0]) == 3
assert len(scores[1]) == 2
return {0: 3}
elif ts == 6:
assert len(scores) == 1
assert len(scores[0]) == 4
return {0: 0}
else:
raise Exception()
info, models, history, best = await fit(
model,
params,
X,
y,
X.blocks[-1],
y.blocks[-1],
additional_calls,
scorer=None,
fit_params={"classes": [0, 1]},
)
assert all(model.done() for model in models.values())
models = await c.compute(models)
model = models[0]
meta = info[0][-1]
assert meta["params"] == {"alpha": 0.1}
assert meta["partial_fit_calls"] == 6 + 1
assert len(info) > len(models) == 1
assert set(models.keys()).issubset(set(info.keys()))
assert meta["partial_fit_calls"] == history[-1]["partial_fit_calls"]
calls = {k: [h["partial_fit_calls"] for h in hist] for k, hist in info.items()}
for k, call in calls.items():
assert (np.diff(call) >= 1).all()
assert set(models.keys()) == {0}
del models[0]
while s.tasks or c.futures: # all data clears out
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_search_basic(c, s, a, b):
for decay_rate, input_type, memory in itertools.product(
{0, 1}, ["array", "dataframe"], ["distributed"]
):
success = await _test_search_basic(decay_rate, input_type, memory, c, s, a, b)
assert isinstance(success, bool) and success, "Did the test run?"
async def _test_search_basic(decay_rate, input_type, memory, c, s, a, b):
X, y = make_classification(n_samples=1000, n_features=5, chunks=(100, 5))
assert isinstance(X, da.Array)
if memory == "distributed" and input_type == "dataframe":
X = dd.from_array(X)
y = dd.from_array(y)
assert isinstance(X, dd.DataFrame)
elif memory == "local":
X, y = await c.compute([X, y])
assert isinstance(X, np.ndarray)
if input_type == "dataframe":
X, y = pd.DataFrame(X), pd.DataFrame(y)
assert isinstance(X, pd.DataFrame)
model = SGDClassifier(tol=1e-3, loss="log", penalty="elasticnet")
params = {"alpha": np.logspace(-2, 2, 100), "l1_ratio": np.linspace(0.01, 1, 200)}
kwargs = dict(n_initial_parameters=20, max_iter=10)
if decay_rate == 0:
search = IncrementalSearchCV(model, params, **kwargs)
elif decay_rate == 1:
search = InverseDecaySearchCV(model, params, **kwargs)
else:
raise ValueError()
await search.fit(X, y, classes=[0, 1])
assert search.history_
for d in search.history_:
assert d["partial_fit_calls"] <= search.max_iter + 1
assert isinstance(search.best_estimator_, SGDClassifier)
assert search.best_score_ > 0
assert "visualize" not in search.__dict__
assert search.best_params_
assert search.cv_results_ and isinstance(search.cv_results_, dict)
assert {
"mean_partial_fit_time",
"mean_score_time",
"std_partial_fit_time",
"std_score_time",
"test_score",
"rank_test_score",
"model_id",
"params",
"partial_fit_calls",
"param_alpha",
"param_l1_ratio",
}.issubset(set(search.cv_results_.keys()))
assert len(search.cv_results_["param_alpha"]) == 20
assert all(isinstance(v, np.ndarray) for v in search.cv_results_.values())
if decay_rate == 0:
assert (
search.cv_results_["test_score"][search.best_index_]
>= search.cv_results_["test_score"]
).all()
assert search.cv_results_["rank_test_score"][search.best_index_] == 1
else:
assert all(search.cv_results_["test_score"] >= 0)
assert all(search.cv_results_["rank_test_score"] >= 1)
assert all(search.cv_results_["partial_fit_calls"] >= 1)
assert len(np.unique(search.cv_results_["model_id"])) == len(
search.cv_results_["model_id"]
)
assert sorted(search.model_history_.keys()) == list(range(20))
assert set(search.model_history_[0][0].keys()) == {
"model_id",
"params",
"partial_fit_calls",
"partial_fit_time",
"score",
"score_time",
"elapsed_wall_time",
}
# Dask Objects are lazy
X_ = await c.compute(X)
proba = search.predict_proba(X)
log_proba = search.predict_log_proba(X)
assert proba.shape[1] == 2
assert proba.shape[0] == 1000 or math.isnan(proba.shape[0])
assert log_proba.shape[1] == 2
assert log_proba.shape[0] == 1000 or math.isnan(proba.shape[0])
assert isinstance(proba, da.Array)
assert isinstance(log_proba, da.Array)
proba_ = search.predict_proba(X_)
log_proba_ = search.predict_log_proba(X_)
da.utils.assert_eq(proba, proba_)
da.utils.assert_eq(log_proba, log_proba_)
decision = search.decision_function(X_)
assert decision.shape == (1000,) or math.isnan(decision.shape[0])
return True
@gen_cluster(client=True)
async def test_search_plateau_patience(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
class ConstantClassifier(SGDClassifier):
def __init__(self, value=0):
self.value = value
super(ConstantClassifier, self).__init__(tol=1e-3)
def score(self, *args, **kwargs):
return self.value
params = {"value": np.random.rand(10)}
model = ConstantClassifier()
search = IncrementalSearchCV(
model, params, n_initial_parameters=10, patience=5, tol=0, max_iter=10,
)
await search.fit(X, y, classes=[0, 1])
assert search.history_
assert pd.DataFrame(search.history_).partial_fit_calls.max() <= 5
assert isinstance(search.best_estimator_, SGDClassifier)
assert search.best_score_ == params["value"].max() == search.best_estimator_.value
assert "visualize" not in search.__dict__
assert search.best_score_ > 0
futures = c.compute([X, y])
X_test, y_test = await c.gather(futures)
search.predict(X_test)
search.score(X_test, y_test)
@gen_cluster(client=True)
async def test_search_plateau_tol(c, s, a, b):
model = LinearFunction(slope=1)
params = {"foo": np.linspace(0, 1)}
# every 3 calls, score will increase by 3. tol=1: model did improved enough
search = IncrementalSearchCV(model, params, patience=3, tol=1, max_iter=10)
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
await search.fit(X, y)
assert set(search.cv_results_["partial_fit_calls"]) == {10}
# Every 3 calls, score increases by 3. tol=4: model didn't improve enough
search = IncrementalSearchCV(model, params, patience=3, tol=4, max_iter=10)
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
await search.fit(X, y)
assert set(search.cv_results_["partial_fit_calls"]) == {3}
@gen_cluster(client=True)
async def test_search_max_iter(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
model = SGDClassifier(tol=1e-3, penalty="elasticnet")
params = {"alpha": np.logspace(-2, 10, 10), "l1_ratio": np.linspace(0.01, 1, 20)}
search = IncrementalSearchCV(model, params, n_initial_parameters=10, max_iter=1)
await search.fit(X, y, classes=[0, 1])
for d in search.history_:
assert d["partial_fit_calls"] <= 1
@pytest.mark.xfail(
sys.platform == "win32",
reason="https://github.com/dask/dask-ml/issues/673",
strict=False,
)
@gen_cluster(client=True)
async def test_gridsearch(c, s, a, b):
async def test_gridsearch_func(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
model = SGDClassifier(tol=1e-3)
params = {"alpha": np.logspace(-2, 10, 3), "l1_ratio": np.linspace(0.01, 1, 2)}
search = IncrementalSearchCV(model, params, n_initial_parameters="grid")
await search.fit(X, y, classes=[0, 1])
assert {frozenset(d["params"].items()) for d in search.history_} == {
frozenset(d.items()) for d in ParameterGrid(params)
}
try:
await test_gridsearch_func(c, s, a, b)
except concurrent.futures.TimeoutError:
pytest.xfail(reason="https://github.com/dask/dask-ml/issues/673")
@gen_cluster(client=True)
async def test_numpy_array(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
res = c.compute([X, y])
X, y = await c.gather(res)
model = SGDClassifier(tol=1e-3, penalty="elasticnet")
params = {
"alpha": np.logspace(-5, -3, 10),
"l1_ratio": np.linspace(0, 1, 20),
}
search = IncrementalSearchCV(model, params, n_initial_parameters=10, max_iter=10)
await search.fit(X, y, classes=[0, 1])
# smoke test to ensure search completed successfully
assert search.best_score_ > 0
@gen_cluster(client=True)
async def test_transform(c, s, a, b):
async def test_transform_func(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
model = MiniBatchKMeans(random_state=0)
params = {"n_clusters": [3, 4, 5], "n_init": [1, 2]}
search = IncrementalSearchCV(model, params, n_initial_parameters="grid")
await search.fit(X, y)
X_ = await c.compute(X)
result = search.transform(X_)
assert result.shape == (100, search.best_estimator_.n_clusters)
try:
await test_transform_func(c, s, a, b)
except concurrent.futures.TimeoutError:
pytest.xfail(reason="https://github.com/dask/dask-ml/issues/673")
@gen_cluster(client=True)
async def test_small(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
model = SGDClassifier(tol=1e-3, penalty="elasticnet")
params = {"alpha": [0.1, 0.5, 0.75, 1.0]}
search = IncrementalSearchCV(model, params, n_initial_parameters="grid")
await search.fit(X, y, classes=[0, 1])
X_ = await c.compute(X)
search.predict(X_)
@gen_cluster(client=True)
async def test_smaller(c, s, a, b):
# infinite loop
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
model = SGDClassifier(tol=1e-3, penalty="elasticnet")
params = {"alpha": [0.1, 0.5]}
search = IncrementalSearchCV(model, params, n_initial_parameters="grid")
await search.fit(X, y, classes=[0, 1])
X_ = await c.compute(X)
search.predict(X_)
def _remove_worst_performing_model(info):
calls = {v[-1]["partial_fit_calls"] for v in info.values()}
ests = {v[-1]["params"]["final_score"] for v in info.values()}
if max(calls) == 1:
assert all(x in ests for x in [1, 2, 3, 4, 5])
elif max(calls) == 2:
assert all(x in ests for x in [2, 3, 4, 5])
assert all(x not in ests for x in [1])
elif max(calls) == 3:
assert all(x in ests for x in [3, 4, 5])
assert all(x not in ests for x in [1, 2])
elif max(calls) == 4:
assert all(x in ests for x in [4, 5])
assert all(x not in ests for x in [1, 2, 3])
elif max(calls) == 5:
assert all(x in ests for x in [5])
assert all(x not in ests for x in [1, 2, 3, 4])
return {k: 0 for k in info.keys()}
recent_scores = {
k: v[-1]["score"]
for k, v in info.items()
if v[-1]["partial_fit_calls"] == max(calls)
}
return {k: 1 for k, v in recent_scores.items() if v > min(recent_scores.values())}
@gen_cluster(client=True)
async def test_high_performing_models_are_retained_with_patience(c, s, a, b):
"""
This tests covers a case when high performing models plateau before the
search is finished.
This covers the use case when one poor-performing model takes a long time
to converge, but all other high-performing models have finished (and
plateaued).
Details
-------
This test defines
* low performing models that continue to improve
* high performing models that are constant
It uses a small tolerance to stop the constant (and high-performing) models.
This test is only concerned with making sure the high-performing model is
retained after it has reached a plateau. It is not concerned with making
sure models are killed off at correct times.
"""
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
params = {"final_score": [1, 2, 3, 4, 5]}
search = IncrementalSearchCV(
_MaybeLinearFunction(),
params,
patience=2,
tol=1e-3, # only stop the constant functions
n_initial_parameters="grid",
max_iter=20,
)
search._adapt = _remove_worst_performing_model
await search.fit(X, y)
assert search.best_params_ == {"final_score": 5}
@gen_cluster(client=True)
async def test_same_params_with_random_state(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=10, chunks=10, random_state=0)
model = SGDClassifier(tol=1e-3, penalty="elasticnet", random_state=1)
params = {"alpha": scipy.stats.uniform(1e-4, 1)}
# Use InverseDecaySearchCV to decay the models and make sure the same ones
# are selected
kwargs = dict(n_initial_parameters=10, random_state=2)
search1 = InverseDecaySearchCV(clone(model), params, **kwargs)
await search1.fit(X, y, classes=[0, 1])
params1 = search1.cv_results_["param_alpha"]
search2 = InverseDecaySearchCV(clone(model), params, **kwargs)
await search2.fit(X, y, classes=[0, 1])
params2 = search2.cv_results_["param_alpha"]
assert np.allclose(params1, params2)
@gen_cluster(client=True)
async def test_model_random_determinism(c, s, a, b):
# choose so d == n//10. Then each partial_fit call is very
# unstable, so models will vary a lot.
n, d = 50, 5
X, y = make_classification(
n_samples=n, n_features=d, chunks=n // 10, random_state=0
)
params = {
"loss": ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
"average": [True, False],
"learning_rate": ["constant", "invscaling", "optimal"],
"eta0": np.logspace(-2, 0, num=1000),
}
model = SGDClassifier(random_state=1)
kwargs = dict(n_initial_parameters=10, random_state=2, max_iter=10)
search1 = InverseDecaySearchCV(model, params, **kwargs)
await search1.fit(X, y, classes=[0, 1])
search2 = InverseDecaySearchCV(clone(model), params, **kwargs)
await search2.fit(X, y, classes=[0, 1])
assert search1.best_score_ == search2.best_score_
assert search1.best_params_ == search2.best_params_
assert np.allclose(search1.best_estimator_.coef_, search2.best_estimator_.coef_)
@gen_cluster(client=True)
async def test_min_max_iter(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
est = SGDClassifier()
params = {"alpha": np.logspace(-3, 0)}
search = IncrementalSearchCV(est, params, max_iter=0)
with pytest.raises(ValueError, match="max_iter < 1 is not supported"):
await search.fit(X, y, classes=[0, 1])
@gen_cluster(client=True)
async def test_history(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
alg = IncrementalSearchCV(model, params, max_iter=9, random_state=42)
await alg.fit(X, y)
gt_zero = lambda x: x >= 0
gt_one = lambda x: x >= 1
key_types_and_checks = [
("mean_partial_fit_time", float, gt_zero),
("mean_score_time", float, gt_zero),
("std_partial_fit_time", float, gt_zero),
("std_score_time", float, gt_zero),
("test_score", float, gt_zero),
("rank_test_score", int, gt_one),
("model_id", int, None),
("partial_fit_calls", int, gt_zero),
("params", dict, lambda d: set(d.keys()) == {"value"}),
("param_value", float, gt_zero),
]
assert set(alg.cv_results_) == {v[0] for v in key_types_and_checks}
for column, dtype, condition in key_types_and_checks:
if dtype:
assert alg.cv_results_[column].dtype == dtype
if condition:
assert all(condition(x) for x in alg.cv_results_[column])
alg.best_estimator_.fit(X, y)
alg.best_estimator_.score(X, y)
alg.score(X, y)
# Test types/format of all parameters we set after fitting
assert isinstance(alg.best_index_, int)
assert isinstance(alg.best_estimator_, ConstantFunction)
assert isinstance(alg.best_score_, float)
assert isinstance(alg.best_params_, dict)
assert isinstance(alg.history_, list)
assert all(isinstance(h, dict) for h in alg.history_)
assert isinstance(alg.model_history_, dict)
assert all(vi in alg.history_ for v in alg.model_history_.values() for vi in v)
assert all(isinstance(v, np.ndarray) for v in alg.cv_results_.values())
assert isinstance(alg.multimetric_, bool)
keys = {
"score",
"score_time",
"partial_fit_calls",
"partial_fit_time",
"model_id",
"elapsed_wall_time",
"params",
}
assert all(set(h.keys()) == keys for h in alg.history_)
times = [v["elapsed_wall_time"] for v in alg.history_]
assert (np.diff(times) >= 0).all()
# Test to make sure history_ ordered with wall time
assert (np.diff([v["elapsed_wall_time"] for v in alg.history_]) >= 0).all()
for model_hist in alg.model_history_.values():
calls = [h["partial_fit_calls"] for h in model_hist]
assert (np.diff(calls) >= 1).all() or len(calls) == 1
@pytest.mark.parametrize("Search", [HyperbandSearchCV, IncrementalSearchCV])
@pytest.mark.parametrize("verbose", [True, False])
def test_verbosity(Search, verbose, capsys):
max_iter = 15
@gen_cluster(client=True)
async def _test_verbosity(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
search = Search(model, params, max_iter=max_iter, verbose=verbose)
await search.fit(X, y)
assert search.best_score_ > 0 # ensure search ran
return search
# IncrementalSearchCV always logs to INFO
logger = logging.getLogger("dask_ml.model_selection")
with captured_logger(logger) as logs:
_test_verbosity()
messages = logs.getvalue().splitlines()
# Make sure we always log
assert messages
assert any("score" in m for m in messages)
# If verbose=True, make sure logs to stdout
_test_verbosity()
std = capsys.readouterr()
stdout = [line for line in std.out.split("\n") if line]
if verbose:
assert len(stdout) >= 1
assert all(["CV" in line for line in stdout])
else:
assert not len(stdout)
if "Hyperband" in str(Search):
assert all("[CV, bracket=" in m for m in messages)
else:
assert all("[CV]" in m for m in messages)
brackets = 3 if "Hyperband" in str(Search) else 1
assert sum("examples in each chunk" in m for m in messages) == brackets
assert sum("creating" in m and "models" in m for m in messages) == brackets
@gen_cluster(client=True)
async def test_verbosity_types(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
for verbose in [-1.0, 1.2]:
search = IncrementalSearchCV(model, params, verbose=verbose, max_iter=3)
with pytest.raises(ValueError, match="0 <= verbose <= 1"):
await search.fit(X, y)
for verbose in [0.0, 0, 1, 1.0, True, False]:
search = IncrementalSearchCV(model, params, verbose=verbose, max_iter=3)
await search.fit(X, y)
@pytest.mark.parametrize("verbose", [0, 0.0, 1 / 2, 1, 1.0, False, True])
def test_verbosity_levels(capsys, verbose):
max_iter = 14
@gen_cluster(client=True)
async def _test_verbosity(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
search = IncrementalSearchCV(model, params, max_iter=max_iter, verbose=verbose)
await search.fit(X, y)
return search
with captured_logger(logging.getLogger("dask_ml.model_selection")) as logs:
search = _test_verbosity()
assert search.best_score_ > 0 # ensure search ran
messages = logs.getvalue().splitlines()
factor = 1 if isinstance(verbose, bool) else verbose
assert len(messages) == pytest.approx(max_iter * factor + 2, abs=1)
@gen_cluster(client=True)
async def test_search_patience_infeasible_tol(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
rng = check_random_state(42)
params = {"value": rng.rand(1000)}
model = ConstantFunction()
max_iter = 10
score_increase = -10
search = IncrementalSearchCV(
model, params, max_iter=max_iter, patience=3, tol=score_increase,
)
await search.fit(X, y, classes=[0, 1])
hist = pd.DataFrame(search.history_)
assert hist.partial_fit_calls.max() == max_iter
@gen_cluster(client=True)
async def test_search_basic_patience(c, s, a, b):
X, y = make_classification(n_samples=100, n_features=5, chunks=(10, 5))
rng = check_random_state(42)
params = {"slope": 2 + rng.rand(1000)}
model = LinearFunction()
# Test the case where tol to small (all models finish)
max_iter = 15
patience = 5
increase_after_patience = patience
search = IncrementalSearchCV(
model,
params,
max_iter=max_iter,
tol=increase_after_patience,
patience=patience,
fits_per_score=3,
)
await search.fit(X, y, classes=[0, 1])
hist = | pd.DataFrame(search.history_) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[79]:
import numpy as np
import pandas as pd
import datetime
import csv
# In[45]:
#modifing the text file to make the split of date and temperture pairs.
def textModify():
with open('temperature.txt', 'r') as file :
filedata = file.read()
filedata = filedata.replace('), ', ' $ ')
with open('temperature.txt', 'w') as file:
file.write(filedata)
# In[59]:
def dataCleaning(temp):
# processing 2000 rows in a single dataframe and split the date and temperature values into two columns
temp1 = temp.iloc[:,1]
temp.drop(1, axis=1, inplace = True)
temperature_main = pd.concat([temp, temp1], ignore_index=True)
temperature_main = temperature_main[0].str.split(', ', expand =True)
# converting the date to year and temperature to integer
for i in range(len(temperature_main[0])):
#year
value1=temperature_main.loc[i,0]
temperature_main.loc[i,0] = value1.replace(" ","")
temperature_main.loc[i,0] = value1.lstrip('(')
temperature_main.loc[i,0] = value1.lstrip(' (')
temperature_main.loc[i,0] = datetime.datetime.strptime(temperature_main.loc[i,0], "%Y%m").year
#temperature
value2= temperature_main.loc[i,1]
temperature_main.loc[i,1] = value2.replace(" ","")
temperature_main.loc[i,1] = value2.rstrip(')')
temperature_main.loc[i,1] = int(temperature_main.loc[i,1])
#rename column headers
temperature_main.rename(columns = {0: "year" , 1: "temperature"}, inplace = True)
return temperature_main
# In[47]:
def dataSplit(main_dataSet):
#split the data into two data sets to send to mapper
#first 1000 rows to mapper1
dataSet1 = main_dataSet.iloc[0:1000,:]
#second 1000 rows to mapper2
dataSet2 = main_dataSet.iloc[1000:2000,:]
return dataSet1,dataSet2
# In[48]:
def mapper(dataSet):
#converting the dataframe to list
mapperOutput = list(dataSet.to_records(index = False))
return mapperOutput
# In[49]:
def sortFunction(mapping):
#sorting the output of mapping fuction in ascending order
mapping.sort(key=lambda x:x[0])
#shuffling all the data based on the keys
shuffle ={}
for key, value in mapping:
shuffle.setdefault(key,[]).append(value)
return shuffle
# In[50]:
def partitionFunction(dataSet):
#spliting the data to send to reducer
reducer1 = {}
reducer2 = {}
for key, value in dataSet.items():
if key <= 2015:
reducer1[key] = value
else:
reducer2[key] = value
return reducer1, reducer2
# In[97]:
def reducerFunction(dataSet):
#aquiring the max value for each year
for key, values in dataSet.items():
dataSet[key] = max(values)
return dataSet
# In[105]:
def toCsvFile(output):
#defining header for csv file
headers= ['year', 'Max Temp']
with open('max_temp.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames = headers)
writer.writeheader()
#writting data into csv files
for key in output.keys():
f.write("%s, %s\n" % (key, output[key]))
# In[106]:
def mainFunction():
textModify()
temp= | pd.read_csv('temperature.txt',sep="$", header=None) | pandas.read_csv |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on 12/20/2019
@author: Azhu
"""
import glob
import pdfquery
import os
import tabula as tb
import cv2
import numpy as np
import pandas as pd
import PyPDF2
from PyPDF2 import PdfFileReader, PdfFileWriter
########################################################################################
# define a scraping function
def pdfscrape(pdf):
# extract year
label = pdf.pq('LTTextLineHorizontal:contains("{}")'.format("Tax Statement") )
year = pd.DataFrame()
for i in range(len(label)):
x0 = float(label[i].get('x1',0)) + 5
y0 = float(label[i].get('y1',0)) + 1
x1 = float(label[i].get('x1',0)) + 30
y1 = float(label[i].get('y1',0)) + 2
if (x0<300) & (x1<300) & (y0>440) & (y1>440):
loc = 1
elif (x0>300) & (x1>300) & (y0>440) & (y1>440):
loc = 2
elif (x0<300) & (x1<300) & (y0<440) & (y1<440):
loc = 3
elif (x0>300) & (x1>300) & (y0<440) & (y1<440):
loc = 4
year_str = pdf.pq('LTTextLineHorizontal:overlaps_bbox("%s, %s, %s, %s")' % (x0, y0, x1, y1)).text()
record = pd.DataFrame({'loc': loc, 'year': year_str}, index=[0])
year = year.append(record, ignore_index = True)
# extract ssn
label = pdf.pq('LTTextLineHorizontal:contains("a Employee")')
ssn = pd.DataFrame()
for i in range(len(label)):
x0 = float(label[i].get('x0',0))
y0 = float(label[i].get('y0',0)) - 15
x1 = float(label[i].get('x1',0))
y1 = float(label[i].get('y0',0)) - 1
if str(label[i].layout).find('SSN')>0:
if (x0<300) & (x1<300) & (y0>440) & (y1>440):
loc = 1
elif (x0>300) & (x1>300) & (y0>440) & (y1>440):
loc = 2
elif (x0<300) & (x1<300) & (y0<440) & (y1<440):
loc = 3
elif (x0>300) & (x1>300) & (y0<440) & (y1<440):
loc = 4
ssn_str = pdf.pq('LTTextLineHorizontal:overlaps_bbox("%s, %s, %s, %s")' % (x0, y0, x1, y1)).text()
record = pd.DataFrame({'loc': loc, 'ssn': ssn_str}, index=[0])
ssn = ssn.append(record, ignore_index = True)
else:
continue
# extract employer
label = pdf.pq('LTTextLineHorizontal:contains("{}")'.format("d Control") )
employer = | pd.DataFrame() | pandas.DataFrame |
import scipy.io as sio
import numpy as np
import pandas as pd
import tables
import pickle
from scipy.interpolate import interp1d
import os
from ismore import settings
from utils.constants import *
pkl_name = os.path.expandvars('$BMI3D/riglib/ismore/traj_reference_interp.pkl')
mat_name = os.path.expandvars('$HOME/Desktop/Kinematic data ArmAssist/Epoched data/epoched_kin_data/NI_sess05_20140610/NI_B1S005R01.mat')
columns = [
'Time',
'AbsPos_X(mm)',
'AbsPos_Y(mm)',
'AbsPos_Angle(dg)',
'Kalman_X(mm)',
'Kalman_Y(mm)',
'Kalman_Angle(dg)',
'Arm_Force(gr)',
'Arm_Angle(dg)',
'Supination(dg)',
'Thumb(dg)',
'Index(dg)',
'Fingers(dg)',
]
field_mapping = {
'Time': 'ts',
'AbsPos_X(mm)': 'aa_px',
'AbsPos_Y(mm)': 'aa_py',
'AbsPos_Angle(dg)': 'aa_ppsi',
'Supination(dg)': 'rh_pprono',
'Thumb(dg)': 'rh_pthumb',
'Index(dg)': 'rh_pindex',
'Fingers(dg)': 'rh_pfing3',
}
aa_xy_states = ['aa_px', 'aa_py']
aa_pos_states = ['aa_px', 'aa_py', 'aa_ppsi']
rh_pos_states = ['rh_pthumb', 'rh_pindex', 'rh_pfing3', 'rh_pprono']
rh_vel_states = ['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']
ang_pos_states = ['aa_ppsi', 'rh_pthumb', 'rh_pindex', 'rh_pfing3', 'rh_pprono']
pos_states = aa_pos_states + rh_pos_states
# ArmAssist and ReHand trajectories are saved as separate pandas DataFrames with the following indexes
aa_fields = ['ts'] + aa_pos_states
rh_fields = ['ts'] + rh_pos_states + rh_vel_states
def preprocess_data(df):
# rename dataframe fields to match state space names used in Python code
df = df.rename(columns=field_mapping)
# convert units to sec, cm, rad
df['ts'] *= ms_to_s
df[aa_xy_states] *= mm_to_cm
df[ang_pos_states] *= deg_to_rad
# translate ArmAssist and ReHand trajectories to start at a particular position
starting_pos = settings.starting_pos
pos_offset = df.ix[0, pos_states] - starting_pos
df[pos_states] -= pos_offset
# differentiate ReHand positions to get ReHand velocity data
delta_pos = np.diff(df[rh_pos_states], axis=0)
delta_ts = np.diff(df['ts']).reshape(-1, 1)
vel = delta_pos / delta_ts
vel = np.vstack([np.zeros((1, 4)), vel])
df_rh_vel = pd.DataFrame(vel, columns=rh_vel_states)
df = pd.concat([df, df_rh_vel], axis=1)
return df
# load kinematic data
mat = sio.loadmat(mat_name, struct_as_record=False, squeeze_me=True)
kin_epoched = mat['kin_epoched']
trial_types = ['Blue', 'Brown', 'Green', 'Red']
# create a dictionary of trajectories, indexed by trial_type
traj = dict()
for i, kin in enumerate(kin_epoched):
df = | pd.DataFrame(kin, columns=columns) | pandas.DataFrame |
import pytest
from datetime import datetime
import pandas as pd
from tadpole_algorithms.transformations import convert_to_year_month, \
convert_to_year_month_day, map_string_diagnosis
def test_forecastDf_date_conversion():
forecastDf = pd.DataFrame([{'Forecast Date': '2019-07'}])
assert pd.api.types.is_string_dtype(forecastDf.dtypes)
# original conversion code
forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01
print(forecastDf.dtypes)
assert pd.api.types.is_datetime64_ns_dtype(forecastDf['Forecast Date'])
# new conversion code
# from string
forecastDf_new1 = pd.DataFrame([{'Forecast Date': '2019-07'}])
forecastDf_new1['Forecast Date'] = convert_to_year_month(forecastDf_new1['Forecast Date'])
assert pd.api.types.is_datetime64_ns_dtype(forecastDf_new1['Forecast Date'])
# from date object
forecastDf_new2 = pd.DataFrame([{'Forecast Date': datetime(2019, 7, 1, 0, 0, 0, 0)}])
forecastDf_new2['Forecast Date'] = convert_to_year_month(forecastDf_new2['Forecast Date'])
assert pd.api.types.is_datetime64_ns_dtype(forecastDf_new2['Forecast Date'])
assert forecastDf['Forecast Date'].equals(forecastDf_new1['Forecast Date'])
assert forecastDf_new1['Forecast Date'].equals(forecastDf_new2['Forecast Date'])
def test_d4Df_date_conversions():
d4Df = pd.DataFrame([{'ScanDate': '2019-07-10'}])
assert pd.api.types.is_string_dtype(d4Df.dtypes)
# original code:
d4Df['ScanDate'] = [datetime.strptime(x, '%Y-%m-%d') for x in d4Df['ScanDate']]
assert pd.api.types.is_datetime64_ns_dtype(d4Df['ScanDate'])
# new conversion code
# from string
d4Df_new1 = pd.DataFrame([{'ScanDate': '2019-07-10'}])
d4Df_new1['ScanDate'] = convert_to_year_month_day(d4Df_new1['ScanDate'])
assert pd.api.types.is_datetime64_ns_dtype(d4Df_new1['ScanDate'])
# from date object
d4Df_new2 = pd.DataFrame([{'ScanDate': datetime(2019, 7, 10, 0, 0, 0, 0)}])
d4Df_new2['ScanDate'] = convert_to_year_month_day(d4Df_new2['ScanDate'])
assert | pd.api.types.is_datetime64_ns_dtype(d4Df_new2['ScanDate']) | pandas.api.types.is_datetime64_ns_dtype |
import pandas as pd
ds = | pd.Series([2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) | pandas.Series |
"""
Copyright (C) 2018 <NAME> (<EMAIL>)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ast
import json
from collections import defaultdict
from collections.abc import Iterable
from copy import deepcopy
from itertools import product, combinations
import numpy as np
import pandas as pd
from scipy.sparse import csc_matrix
from scipy.sparse import csgraph as csg
from .motif import Motif
from .prebuilt import PREBUILT
# 1. _event_pair_processed contains event pairs event if they do not create an edge first time round.
# Need to check that event pairs are being processes for objects even if they didn't make an edge
# when we considered node connectivity.
# 2. Current we can easily remove numpy and pandas dependencies (leaving only scipy and
# inbuilt)
class NotImplementedError(BaseException):
"""Returns when a function is not implemented."""
pass
class BadInputError(BaseException):
"""Returns when data input is not in the correct format."""
pass
class NoObjectError(BaseException):
"""Returns when events do not have any objects."""
pass
class EventGraph(object):
"""
General event graph class for building event graphs from sequences of events.
Event graphs can be constructed for arbitrary event sequences, containing directed
and undirected hyperevents, and containing arbitrary extra data.
A number of event joining rules are implemented, and custom rules can used.
The event graph is described by two tables, the table of events (EventGraph.events),
and the table of event edges (EventGraph.eg_edges). There also exists an extra table
(EventGraph.events_meta) which gives further information on events such as component
or cluster membership.
Event graphs should be created using one of the following class methods:
EventGraph.from_pandas_eventlist() (default)
EventGraph.from_dict_eventlist()
EventGraph.from_file()
Example:
events = [{'source': 0, 'target':1, 'time':1, 'type':'A'},
{'source': 1, 'target':2, 'time':3, 'type':'B'}]
eg = EventGraph.from_dict_eventlist(events, graph_rules='teg')
References:
[1] <NAME>, The Temporal Event Graph, Jouurnal of Complex Networks (2017)
[2] <NAME>, In prepartion (2018)
[3] <NAME>, In prepartion (2018)
"""
# TO DO, along with other magic methods where needed.
def __repr__(self):
status = 'built' if hasattr(self, 'eg_edges') else 'unbuilt'
edges = len(self.eg_edges) if hasattr(self, 'eg_edges') else 0
return "<EventGraph with {} nodes, {} events, and {} edges (status: {})>".format(self.N,
self.M,
edges,
status)
def __len__(self):
return self.M
@classmethod
def from_pandas_eventlist(cls, events, graph_rules, **kwargs):
"""
Loads an event list in the form of a Pandas DataFrame into an unbuilt EventGraph.
Input:
events (pd.DataFrame): Table of temporal events
graph_rules (str or dict): Rule set to build the event graph. Currently implemented
are ['teg', 'eg', 'pfg']. See prebuilt.py for custom schema.
Returns:
EventGraph
"""
return cls(events=events, graph_rules=graph_rules, **kwargs)
@classmethod
def from_dict_eventlist(cls, events, graph_rules, **kwargs):
"""
Loads an event list in the form of a list of records into an unbuilt EventGraph.
Input:
events (list): List of events of the minimal form {'source': X, 'target': X, 'time': X}.
graph_rules (str or dict): Rule set to build the event graph. Currently implemented
are ['teg', 'eg', 'pfg']. See prebuilt.py for custom schema.
Returns:
EventGraph
"""
return cls(events=pd.DataFrame(events), graph_rules=graph_rules, **kwargs)
@classmethod
def from_file(cls, filepath):
"""
Load a built event graph from file (either stored as .json or .pkl)
Input:
filepath: Filepath to saved event graph
Returns:
EventGraph
"""
if filepath.endswith('.json'):
with open(filepath, 'r', encoding='utf-8') as file:
payload = json.load(file)
for item in ['events', 'events_meta', 'eg_edges']:
payload[item] = pd.DataFrame.from_dict(payload[item])
else:
raise Exception("Currently only import from .json supported.")
return cls(**payload)
def __init__(self, *args, **kwargs):
# This massively needs tidying!
# SANITISATION
# 1. ENSURE EVENTS ARE IN TIME ORDER
# 2.
self.events = kwargs['events']
if not isinstance(self.events, pd.DataFrame):
raise BadInputError(
"Events must be a DataFrame ({} passed), or passed through classmethods.".format(type(self.events)))
self.directed = kwargs.get('directed', True)
if 'target' not in self.events.columns:
self.events['target'] = np.empty((len(self.events), 0)).tolist()
self.directed = False # Efficiency savings to be had if we treat seperately.
if 'events_meta' in kwargs.keys():
self.events_meta = kwargs['events_meta']
else:
self.events_meta = pd.DataFrame(index=self.events.index)
self.ne_incidence = None
self.oe_incidence = None
self.ne_matrix = None
self.oe_matrix = None
self.eg_matrix = None
if 'rules' in kwargs.keys():
self.event_graph_rules = kwargs['graph_rules']
else:
if isinstance(kwargs['graph_rules'], dict):
# Possibly require further checks for custom rules
self.event_graph_rules = kwargs['graph_rules']
elif kwargs['graph_rules'].lower() in ['teg', 'temporal event graph', 'temporal_event_graph']:
self.event_graph_rules = PREBUILT['temporal_event_graph']
elif kwargs['graph_rules'].lower() in ['eg', 'event graph', 'event_graph']:
self.event_graph_rules = PREBUILT['general_event_graph']
elif kwargs['graph_rules'].lower() in ['pfg', 'path finder graph', 'path_finder_graph']:
self.event_graph_rules = PREBUILT['path_finder_graph']
else:
raise Exception("Incompatible Rules")
if 'eg_edges' in kwargs.keys():
self.eg_edges = kwargs['eg_edges']
# This will now give the index of the event pair (edge) in the event graph
built = kwargs.get('built', False)
if built:
self._event_pair_processed = {row.source: {row.target: ix} for ix, row in self.eg_edges.iterrows()}
else:
self._event_pair_processed = kwargs.get('_event_pair_processed',
defaultdict(lambda: defaultdict(bool)))
self.generate_node_event_incidence()
build_on_creation = kwargs.get('build_on_creation', False)
if build_on_creation:
self.build()
# Indexes edges of the eventgraph as we create them.
self._edge_indexer = 0
@property
def M(self):
""" Number of events in the event graph."""
return len(self.events)
@property
def N(self):
""" Number of nodes in the event graph."""
return len(self.ne_incidence)
@property
def D(self):
""" Duration of the event graph (requires ordered event table). """
if 'duration' in self.events.columns:
return self.events.iloc[-1].time + self.events.iloc[-1].duration - self.events.iloc[0].time
else:
return self.events.iloc[-1].time - self.events.iloc[0].time
def generate_node_event_incidence(self):
"""
Creates a node-event incidence dictionary used to build the event graph.
Input:
None
Returns:
None
"""
self.ne_incidence = defaultdict(list)
for ix, event in self.events.iterrows():
for group in ['source', 'target']:
if isinstance(event[group], Iterable) and not isinstance(event[group], str):
for node in event[group]:
self.ne_incidence[node].append(ix)
else:
self.ne_incidence[event[group]].append(ix)
def _generate_object_event_incidence(self):
"""
Creates an object-event incidence dictionary used to build the event graph.
Input:
None
Returns:
None
"""
self.oe_incidence = defaultdict(list)
for ix, event in self.events.iterrows():
if isinstance(event.objects, Iterable) and not isinstance(event.objects, str):
for obj in event.objects:
self.oe_incidence[obj].append(ix)
else:
self.oe_incidence[event.objects].append(ix)
def generate_node_event_matrix(self):
"""
Creates a node-event matrix using the node-event incidence dictionary.
The matrix A_{ij} = 1 if node i is a participant in event j.
The matrix is of size (N,M).
Input:
None
Returns:
event_matrix (scipy.sparse.csc_matrix):
"""
if self.ne_incidence is None:
self.generate_node_event_incidence()
if not hasattr(self, 'event_map'):
self.event_map = self.events.reset_index(drop=False)['index']
if not hasattr(self, 'node_map'):
self.node_map = pd.Series(sorted([x for x in self.ne_incidence.keys()]))
inv_event_map = pd.Series(self.event_map.index, index=self.event_map)
inv_node_map = pd.Series(self.node_map.index, index=self.node_map)
rows = []
cols = []
for node, events in self.ne_incidence.items():
for event in events:
rows.append(inv_node_map[node])
cols.append(inv_event_map[event])
data = np.ones_like(rows)
self.ne_matrix = csc_matrix((data, (rows, cols)), dtype=bool)
return self.ne_matrix
def generate_eg_matrix(self, binary=False):
"""
Generate an (MxM) matrix of the event graph, weighted by inter-event times.
Input:
None
Returns:
event_matrix (scipy.sparse.csc_matrix):
"""
if not hasattr(self, 'event_map'):
self.event_map = self.events.reset_index(drop=False)['index']
inv_event_map = pd.Series(self.event_map.index, index=self.event_map)
# Make a sparse EG matrix
rows = []
cols = []
data = []
for ix, edge in self.eg_edges.iterrows():
rows.append(inv_event_map[edge.source])
cols.append(inv_event_map[edge.target])
data.append(edge.delta)
if binary:
data = [1 for d in data]
self.eg_matrix = csc_matrix((data, (rows, cols)),
shape=(self.M, self.M),
dtype=int)
return self.eg_matrix
def build(self, verbose=False):
"""
Builds the event graph from event sequence.
Input:
verbose (bool): If True, prints out progress of build [default=False]
Returns:
None
"""
eg_edges = {}
for count, events in enumerate(self.ne_incidence.values()):
if verbose and count % 50 == 0: print(count, '/', self.N, end='\r', flush=True)
for ix1, event_one in enumerate(events):
for ix2, event_two in enumerate(events[ix1 + 1:]):
if self._event_pair_processed[event_one][event_two]:
pass
else:
e1 = self.events.loc[event_one]
e2 = self.events.loc[event_two]
connected, dt = self.event_graph_rules['event_processor'](e1, e2)
self._event_pair_processed[event_one][event_two] = self._edge_indexer
# If we want to enforce a dt
if dt > self.event_graph_rules['delta_cutoff']:
break
if connected:
eg_edges[self._edge_indexer] = (event_one, event_two, dt)
self._edge_indexer += 1
# if subsequent event only then break
# Can extend our rules so that we can do 'next X events only'.
if self.event_graph_rules['subsequential']:
if ix2 + 1 == self.event_graph_rules['subsequential']:
break
if hasattr(self, 'eg_edges'):
new_edges = pd.DataFrame.from_dict(eg_edges, orient='index')
new_edges.columns = ['source', 'target', 'delta']
self.eg_edges = pd.concat([self.eg_edges, new_edges], join='inner')
else:
self.eg_edges = pd.DataFrame.from_dict(eg_edges, orient='index')
self.eg_edges.columns = ['source', 'target', 'delta']
def _build_from_objects(self, verbose=False):
"""
Builds the event graph using object relations (instead of, of in addition to
the node relations)
Input:
verbose (bool): [default=False]
Returns:
None
"""
if 'objects' not in self.events.columns:
raise NoObjectError("Event data must contain 'objects'.")
self._generate_object_event_incidence()
eg_edges = {}
for count, events in enumerate(self.oe_incidence.values()):
if verbose and count % 50 == 0: print(count, '/', self.N, end='\r', flush=True)
for ix, event_one in enumerate(events):
for event_two in events[ix + 1:]:
if self._event_pair_processed[event_one][event_two]:
pass
else:
e1 = self.events.loc[event_one]
e2 = self.events.loc[event_two]
connected, dt = self.event_graph_rules['event_object_processor'](e1, e2)
self._event_pair_processed[event_one][event_two] = self._edge_indexer
# If we want to enforce a dt
if dt > self.event_graph_rules['delta_cutoff']:
break
if connected:
self._edge_indexer += 1
eg_edges[self._edge_indexer] = (event_one, event_two, dt)
# if subsequent event only then break
# Can extend our rules so that we can do 'next X events only'.
if self.event_graph_rules['subsequential']:
if count + 1 == self.event_graph_rules['subsequential']:
break
if hasattr(self, 'eg_edges'):
new_edges = pd.DataFrame.from_dict(eg_edges, orient='index')
new_edges.columns = ['source', 'target', 'delta']
self.eg_edges = pd.concat([self.eg_edges, new_edges], join='inner')
else:
self.eg_edges = pd.DataFrame.from_dict(eg_edges, orient='index')
self.eg_edges.columns = ['source', 'target', 'delta']
def randomize_event_times(self, seed=None):
"""
Shuffles the times for all events.
Can only be called before the event graph is built.
Input:
seed (int): The seed for the random shuffle [default=None].
Returns:
None
"""
if hasattr(self, 'eg_edges'):
raise Exception("Event Graph has already been built. To randomize data create a new EventGraph object.")
self.events.time = self.events.time.sample(frac=1, random_state=seed).values
self.events = self.events.sort_values(by='time').reset_index(drop=True)
self.generate_node_event_incidence()
def calculate_edge_motifs(self, edge_type=None, condensed=False):
"""
Calculates the two-event motif for all edges of the event graph.
Currently events with duration are unsupported.
Input:
edge_type (str): Column name for which edge types are to be differentiated.
condensed (bool): If True, condenses the motif to be agnostic to the number
of nodes in each event [default=False].
Returns:
None
"""
if edge_type is None or edge_type not in self.events.columns:
columns = ['source', 'target', 'time']
else:
columns = ['source', 'target', 'time', edge_type]
def find_motif(x, columns, condensed=False, directed=False):
""" Create a motif from the joined event table. """
e1 = tuple(x['{}_s'.format(c)] for c in columns)
e2 = tuple(x['{}_t'.format(c)] for c in columns)
return str(Motif(e1, e2, condensed, directed))
temp = pd.merge(pd.merge(self.eg_edges[['source', 'target']],
self.events,
left_on='source',
right_index=True,
suffixes=('_d', ''))[['target_d'] + columns],
self.events,
left_on='target_d',
right_index=True,
suffixes=('_s', '_t'))[
["{}_{}".format(field, code) for field, code in product(columns, ('s', 't'))]]
self.eg_edges['motif'] = temp.apply(
lambda x: find_motif(x, columns, condensed=condensed, directed=self.directed), axis=1)
def create_networkx_aggregate_graph(self, edge_colormap=None):
"""
Creates an aggregate static network of node interactions within the event graph.
Input:
edge_colormap (dict): Mapping from edge type to a color.
Returns:
G (nx.Graph/nx.DiGraph): Aggregrate graph of the event graph.
Directed or undirected dependent on event graph type.
"""
try:
import networkx as nx
except ImportError:
raise ImportError("Networkx package required to create graphs.")
if edge_colormap is None:
edge_colormap = defaultdict(lambda: 'black')
if self.directed:
G = nx.DiGraph()
for _, event in self.events.iterrows():
typed = ('type' in self.events.columns)
attrs = {'type': event.type, 'color': edge_colormap[event.type]} if typed else {'color':'black'}
if isinstance(event.target, Iterable) and (len(event.target) == 0):
G.add_node(event.source)
elif isinstance(event.target, str) or isinstance(event.target, np.int):
G.add_edge(event.source, event.target, **attrs)
else:
for target in event.target:
G.add_edge(event.source, target, **attrs)
else:
G = nx.Graph()
for _, event in self.events.iterrows():
typed = ('type' in self.events.columns)
attrs = {'type': event.type, 'color': edge_colormap[event.type]} if typed else {'color':'black'}
G.add_edges_from(combinations(event.source, 2), **attrs)
return G
def create_networkx_event_graph(self, event_colormap=None, include_graph_data=False):
"""
Creates a networkx graph representation of the event graph.
Input:
edge_colormap (dict): Mapping from edge type to a color.
Returns:
G (nx.DiGraph): Aggregrate graph of the event graph.
Directed or undirected dependent on event graph type.
"""
try:
import networkx as nx
except ImportError:
raise ImportError("Networkx package required to create graphs.")
G = nx.DiGraph()
if event_colormap is None:
event_colormap = defaultdict(lambda: 'grey')
typed = ('type' in self.events.columns)
for ix, event in self.events.iterrows():
attrs = {'type': event.type, 'fillcolor': event_colormap[event.type]} if typed else {'fillcolor':'grey'}
G.add_node(ix, **attrs)
if include_graph_data:
for _, edge in self.eg_edges.iterrows():
G.add_edge(edge.source, edge.target, **{'delta': edge.delta, 'motif': edge.motif})
else:
for _, edge in self.eg_edges.iterrows():
G.add_edge(edge.source, edge.target)
return G
def connected_components_indices(self):
"""
Calculates the component that each event belongs to and saves it to self.events_meta.
Note that component numerical assignment is random and can different after each run.
Input:
None
Returns:
components (list): A list containing the component allocation for each event.
"""
self.generate_eg_matrix()
components = csg.connected_components(self.eg_matrix,
directed=True,
connection='weak',
return_labels=True)[1]
self.events_meta.loc[:, 'component'] = components
return components
def get_component(self, ix):
"""
Returns a component of the event graph as an EventGraph object.
Input:
ix (int): Index of component.
Returns:
eventgraph: EventGraph of the component.
"""
if not hasattr(self.events_meta, 'component'):
self.connected_components_indices()
event_ids = self.events_meta.component == ix
events = self.events[event_ids]
events_meta = self.events_meta[event_ids]
edge_ids = | pd.merge(self.eg_edges, events_meta, left_on='source', right_index=True) | pandas.merge |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/27 15:14
Desc: 东方财富-经济数据-英国
http://data.eastmoney.com/cjsj/foreign_4_0.html
"""
import pandas as pd
import requests
import demjson
# Halifax房价指数月率
def macro_uk_halifax_monthly():
"""
东方财富-经济数据-英国-Halifax 房价指数月率
http://data.eastmoney.com/cjsj/foreign_4_0.html
:return: Halifax 房价指数月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# Halifax 房价指数年率
def macro_uk_halifax_yearly():
"""
东方财富-经济数据-英国-Halifax 房价指数年率
http://data.eastmoney.com/cjsj/foreign_4_1.html
:return: Halifax房价指数年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 贸易帐
def macro_uk_trade():
"""
东方财富-经济数据-英国-贸易帐
http://data.eastmoney.com/cjsj/foreign_4_2.html
:return: 贸易帐
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "2",
"pageNo": "1",
"pageNum": "1",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 央行公布利率决议
def macro_uk_bank_rate():
"""
东方财富-经济数据-英国-央行公布利率决议
http://data.eastmoney.com/cjsj/foreign_4_3.html
:return: 央行公布利率决议
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "3",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 核心消费者物价指数年率
def macro_uk_core_cpi_yearly():
"""
东方财富-经济数据-英国-核心消费者物价指数年率
http://data.eastmoney.com/cjsj/foreign_4_4.html
:return: 核心消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "4",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 核心消费者物价指数月率
def macro_uk_core_cpi_monthly():
"""
东方财富-经济数据-英国-核心消费者物价指数月率
http://data.eastmoney.com/cjsj/foreign_4_5.html
:return: 核心消费者物价指数月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "5",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 消费者物价指数年率
def macro_uk_cpi_yearly():
"""
东方财富-经济数据-英国-消费者物价指数年率
http://data.eastmoney.com/cjsj/foreign_4_6.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "6",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 消费者物价指数月率
def macro_uk_cpi_monthly():
"""
东方财富-经济数据-英国-消费者物价指数月率
http://data.eastmoney.com/cjsj/foreign_4_7.html
:return: 消费者物价指数月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "7",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 零售销售月率
def macro_uk_retail_monthly():
"""
东方财富-经济数据-英国-零售销售月率
http://data.eastmoney.com/cjsj/foreign_4_8.html
:return: 零售销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "8",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 零售销售年率
def macro_uk_retail_yearly():
"""
东方财富-经济数据-英国-零售销售年率
http://data.eastmoney.com/cjsj/foreign_4_9.html
:return: 零售销售年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "4",
"stat": "9",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.t | o_numeric(temp_df["现值"]) | pandas.to_numeric |
import os
from re import X
import sys
import logging
import argparse
import shutil
import numpy as np
from pandas import read_csv, DataFrame
from Bio.SeqIO import parse
from itertools import product
from subprocess import call
from scipy.spatial.distance import pdist, squareform
import plotly.express as px
#from pandas.core.dtypes.missing import na_value_for_dtype
def main():
parser = argparse.ArgumentParser()
group1 = parser.add_argument_group("Base arguments")
group2 = parser.add_argument_group("Algorithm Parameter")
group1.add_argument('-ITS1', # Should be concatinate file
type=str,
default=None,
help='Sequence of ITS1 in fasta format.')
group1.add_argument('-ITS2',
type=str,
default=None,
help='Sequence of ITS2 in fasta format.')
group1.add_argument('-CONCAT',
type=str,
default=None,
help='Sequence of ITS1, 5.8rRNA, ITS2 in fasta format.')
group2.add_argument('-K',
type=str,
default=10,
help='K nearest neighbors.')
group2.add_argument('-e',
type=str,
default=0.5,
help='Epsilon distance for nearest neighbor. ε ⊆ [0; 1]')
group1.add_argument('-out',
type=str,
default='./FunFun_output',
help='Get output of your file.')
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
# Get arguments
ITS1 = args.ITS1
ITS2 = args.ITS2
CONCAT = args.CONCAT
K = args.K
e = args.e
out = args.out
functionality = read_csv('./data/functionality.tsv', sep='\t', index_col=[0])
# Making directory
a_logger = logging.getLogger()
a_logger.setLevel(logging.DEBUG)
def normalize(kmers):
"""
"""
norm = sum(list(kmers.values()))
for kmer in kmers.keys():
kmers[kmer] = kmers[kmer]/ norm
return kmers
def get_kmers_fereq(seq, k=2):
""""
"""
kmers = {"".join(kmer) : 0 for kmer in list(product("AGTC", repeat=k))}
step = 1
start = 0
end = k
cken = []
while end != len(seq) - 1:
kmers[str(seq[start: end])] += 1
start, end = start + step, end + step
step = 1
start = 0
end = k
while end != len(seq.reverse_complement()) - 1:
kmers[str(seq.reverse_complement()[start: end])] += 1
start, end = start + step, end + step
kmers = normalize(kmers)
return kmers
def get_functionality(fungi, matrix_possitions, kofam_ontology, n_neigbors, epsilont=0.5):
"""
"""
neighbor = matrix_possitions.loc[fungi].sort_values()
neighbor = neighbor.drop(fungi)
neighbor = neighbor[neighbor <= epsilont]
if len(neighbor) > n_neigbors:
neighbor = neighbor[: n_neigbors]
if len(neighbor[neighbor == 0].index) > 0:
dict_of_methabolic_function = kofam_ontology[neighbor[neighbor == 0].index].mean(axis=1)
else:
dict_of_methabolic_function = kofam_ontology[neighbor.index].mean(axis=1)
return dict_of_methabolic_function
def get_functionality(fungi, matrix_possitions, kofam_ontology, n_neigbors, epsilont=0.5):
"""
"""
neighbor = matrix_possitions.loc[fungi].sort_values()
neighbor = neighbor.drop(fungi)
neighbor = neighbor[neighbor <= epsilont]
if len(neighbor) > n_neigbors:
neighbor = neighbor[: n_neigbors]
if len(neighbor[neighbor == 0].index) > 0:
dict_of_methabolic_function = kofam_ontology[neighbor[neighbor == 0].index].mean(axis=1)
else:
dict_of_methabolic_function = kofam_ontology[neighbor.index].mean(axis=1)
return dict_of_methabolic_function
def get_matrix(its, *args):
"""
"""
Ortology_group, Meta_micom, non_pred = args
fungi_sample = its.id
marker_frequence = get_kmers_fereq(its.seq, k=5)
marker_frequence['Fungi'] = fungi_sample
base_subset= base.append(DataFrame(data=marker_frequence, index=[fungi_sample])[base.columns])
distance_matrix = squareform(pdist(base_subset.values, 'cosine'))
distance_matrix = DataFrame(data=distance_matrix, index=base_subset.index, columns=base_subset.index)
its_function = get_functionality(fungi_sample, distance_matrix, functionality, n_neigbors=K, epsilont=e)
if Ortology_group == []:
Ortology_group = list(its_function.keys())
predicted_vector = its_function.values
if str(predicted_vector[0]) == 'nan':
a_logger.debug(f'Functional for {its.id} was not predicted!\nTry to change -e ...')
non_pred += 1
Meta_micom[f'Fraction score {fungi_sample}'] = predicted_vector
return Ortology_group, Meta_micom, non_pred
# Start assay
if out is None:
out = './FunFun_output'
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
output_file_handler = logging.FileHandler(f"{out}/FunFun.log")
stdout_handler = logging.StreamHandler(sys.stdout)
# Fasta availability check
if ITS1 is None and ITS2 is None and CONCAT is None:
a_logger.debug('Give an ITS!')
sys.exit()
# Make realignment on base
if ITS1 is not None:
base = read_csv('./data/ITS1_base.tsv', sep='\t', index_col=[0])
marker_seq = ITS1
if ITS2 is not None:
base = read_csv('./data/ITS2_base.tsv', sep='\t', index_col=[0])
marker_seq = ITS2
if CONCAT is not None:
base = | read_csv('./data/CONCAT_base.tsv', sep='\t', index_col=[0]) | pandas.read_csv |
import os
import shutil
import dash
import base64
import io
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
import numpy as np
import dash_table
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
from dash.exceptions import PreventUpdate
from shapely.geometry import Point
from gisele.functions import load, sizing
from gisele import initialization, clustering, processing, collecting, \
optimization, results, grid, branches
import pyutilib.subprocess.GlobalData
pyutilib.subprocess.GlobalData.DEFINE_SIGNAL_HANDLERS_DEFAULT = False
# creation of all global variables used in the algorithm
gis_columns = pd.DataFrame(columns=['ID', 'X', 'Y', 'Population', 'Elevation',
'Weight'])
mg_columns = pd.DataFrame(columns=['Cluster', 'PV [kW]', 'Wind [kW]',
'Diesel [kW]', 'BESS [kWh]',
'Inverter [kW]', 'Investment Cost [k€]',
'OM Cost [k€]', 'Replace Cost [k€]',
'Total Cost [k€]', 'Energy Produced [MWh]',
'Energy Demand [MWh]', 'LCOE [€/kWh]'])
npc_columns = pd.DataFrame(columns=['Cluster', 'Grid NPC [k€]', 'MG NPC [k€]',
'Grid Energy Consumption [MWh]',
'MG LCOE [€/kWh]',
'Grid LCOE [€/kWh]', 'Best Solution'])
# breaking load profile in half for proper showing in the data table
input_profile = pd.read_csv(r'Input/Load Profile.csv').round(4)
load_profile = pd.DataFrame(columns=['Hour 0-12', 'Power [p.u.]',
'Hour 12-24', 'Power (p.u.)'])
load_profile['Hour 0-12'] = pd.Series(np.arange(12)).astype(int)
load_profile['Hour 12-24'] = pd.Series(np.arange(12, 24)).astype(int)
load_profile['Power [p.u.]'] = input_profile.iloc[0:12, 0].values
load_profile['Power (p.u.)'] = input_profile.iloc[12:24, 0].values
lp_data = load_profile.to_dict('records')
# configuration file, eps and pts values separated by - since they are a range
config = pd.read_csv(r'Input/Configuration.csv')
# config.loc[21, 'Value'] = sorted(list(map(int,
# config.loc[21, 'Value'].split('-'))))
# config.loc[20, 'Value'] = sorted(list(map(int,
# config.loc[20, 'Value'].split('-'))))
# empty map to show as initual output
fig = go.Figure(go.Scattermapbox(
lat=[''],
lon=[''],
mode='markers'))
fig.update_layout(mapbox_style="carto-positron")
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
# study area definition
lon_point_list = [-65.07295675004093, -64.75263629329811, -64.73311537903679,
-65.06630189290638]
lat_point_list = [-17.880592240953966, -17.86581365258916, -18.065431449408248,
-18.07471050015602]
polygon_geom = Polygon(zip(lon_point_list, lat_point_list))
study_area = gpd.GeoDataFrame(index=[0], crs=4326,
geometry=[polygon_geom])
# initialization of the app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.MINTY])
app.layout = html.Div([
html.Div([dbc.Row([
dbc.Col(
html.Img(
src=app.get_asset_url("poli_horizontal.png"),
id="polimi-logo",
style={"height": "80px", "width": "auto",
'textAlign': 'center'},
className='four columns'),
width={"size": 2, "offset": 1}, align='center'),
dbc.Col(
html.H1('GISEle: GIS for Electrification',
style={'textAlign': 'center', 'color': '000000'}
),
width={"size": 5, "offset": 2}, align='center')
], ),
]),
html.Div([dcc.Slider(id='step',
min=0,
max=5,
marks={0: 'Start',
1: 'GIS Data Processing', 2: 'Clustering',
3: 'Grid Routing', 4: 'Microgrid Sizing',
5: 'NPC Analysis'},
value=1,
)
], style={'textAlign': 'center', 'margin': '20px'}),
html.Div(id='start_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H2(['Introduction'], style={'margin': '20px',
'color': "#55b298"}),
dbc.Card([
dbc.CardBody(["The GIS for electrification (GISEle) "
"is an open source Python-based tool that "
"uses GIS and terrain analysis to model the "
"area under study, groups loads using a "
"density-based "
"clustering algorithm called DBSCAN and "
"then it uses graph theory to find the "
"least-costly electric network topology "
"that can connect all the people in the "
"area."], style={'textAlign': 'justify'}),
dbc.CardFooter(
dbc.CardLink("Energy4Growing",
href="http://www.e4g.polimi.it/")),
], className="mb-3", style={}),
dbc.Row([
dbc.Col(
dbc.Button(
'NEXT',
size='lg',
color='primary',
id='next_step_start', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 6}, align='center'),
], justify='around'),
]),
width={"size": 3, "offset": 0}),
dbc.Col(html.Div([
dcc.Graph(
id='output_start',
figure=fig,
)
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.Div(id='gis_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H2(['GIS Data Analysis'], style={'color': "#55b298"}),
dbc.Checklist(id='import_data', switch=True, inline=True,
options=[
{'label': 'Download GIS Data',
'value': 'y'},
],
# value='y',
style={'margin': '15px'}
),
dbc.Collapse([
dbc.Label(['Study Area'], id='studyarea_label'),
dbc.Row([
dbc.Label('Y1'),
dbc.Col(
dbc.Input(
id='lat1',
debounce=True,
type='number',
value=-17.8805,
style={'margin': '1px'})
),
dbc.Label('X1'),
dbc.Col(
dbc.Input(
id='lon1',
debounce=True,
type='number',
value=-65.0729,
style={'margin': '1px'})
),
]),
dbc.Row([
dbc.Label('Y2'),
dbc.Col(
dbc.Input(
id='lat2',
debounce=True,
type='number',
value=-17.8658,
style={'margin': '1px'})
),
dbc.Label('X2'),
dbc.Col(
dbc.Input(
id='lon2',
debounce=True,
type='number',
value=-64.7526,
style={'margin': '1px'})
),
]),
dbc.Row([
dbc.Label('Y3'),
dbc.Col(
dbc.Input(
id='lat3',
debounce=True,
type='number',
value=-18.0654,
style={'margin': '1px'})
),
dbc.Label('X3'),
dbc.Col(
dbc.Input(
id='lon3',
debounce=True,
type='number',
value=-64.7331,
style={'margin': '1px'})
),
]),
dbc.Row([
dbc.Label('Y4'),
dbc.Col(
dbc.Input(
id='lat4',
debounce=True,
type='number',
value=-18.0747,
style={'margin': '1px'})
),
dbc.Label('X4'),
dbc.Col(
dbc.Input(
id='lon4',
debounce=True,
type='number',
value=-65.0663,
style={'margin': '1px'})
),
]),
dbc.Row([
dbc.Col(
dbc.Checklist(
options=[
{"label": "Import Population", "value": 1},
],
value=[1],
id="import_pop")
),
dbc.Col(
dcc.Upload(
id='upload_pop',
children=html.Div([
html.A('Select .csv File')
]),
style={
'width': '100%',
'height': '30px',
'lineHeight': '30px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px',
},
),
)
], align='center'),
], id='collapse_gis'),
dbc.Collapse([
dcc.Upload(
id='upload_csv',
children=html.Div([
html.A('Import points .csv file')
]),
style={
'width': '100%',
'height': '100px',
'lineHeight': '100px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'borderRadius': '8px',
'textAlign': 'center',
'margin': '10px',
},
),
html.Div(id='upload_csv_out'),
], id='collapse_gis2'),
dbc.Row([
dbc.Col([
dbc.Label('CRS'),
dbc.Input(
id='crs',
placeholder='EPSG code..',
debounce=True,
type='number',
value=''
),
]),
dbc.Col([
dbc.Label('Resolution [meters]'),
dbc.Input(
id='resolution',
placeholder='1000m',
debounce=True,
type='number',
min=100, step=50,
value='1000'
),
])
], style={'margin': '10px'}),
html.Div([
dbc.Label('Landcover Dataset'),
dcc.Dropdown(
id='landcover_option',
options=[
{'label': 'Global Landcover GLC-2000',
'value': 'GLC'},
{'label': 'Copernicus CGLS-LC100',
'value': 'CGLS'},
{'label': 'ESACCI',
'value': 'ESACCI'},
{'label': 'Other',
'value': 'Other'},
],
value='GLC'
)
], style={'margin': '15px'}),
dbc.Row([
dbc.Col(
dbc.Button(
'RUN',
size='lg',
color='warning',
id='create_df', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
dbc.Col(
dbc.Button(
'NEXT',
size='lg',
color='primary',
id='next_step_gis', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
], justify='around'),
]),
width={"size": 3, "offset": 0}),
dbc.Col(html.Div([
dbc.Spinner(size='lg', color="#4ead84", children=[
dbc.Tabs([
dbc.Tab(label='Map', label_style={"color": "#55b298"},
children=[
dcc.Graph(
id='output_gis',
figure=fig,
)]
),
dbc.Tab(label='Table',
label_style={"color": "#55b298"},
children=[
dash_table.DataTable(
id='datatable_gis',
columns=[
{"name": i, "id": i} for i in
gis_columns.columns
],
style_table={'height': '450px'},
page_count=1,
page_current=0,
page_size=13,
page_action='custom')]
),
]),
])
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.Div(id='cluster_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H5(['Cluster Sensitivity'], style={'margin': '5px',
'color': "#55b298"},
id='cluster_sens_header'),
dbc.Label('Limits for the MINIMUM POINTS'),
dcc.RangeSlider(
id='pts',
allowCross=False,
min=10,
max=2000,
marks={10: '10', 200: '200', 400: '400', 600: '600',
800: '800', 1000: '1000', 1200: '1200',
1400: '1400,', 1600: '1600', 1800: '1800',
2000: '2000'},
step=10,
value=[300, 700]
),
dbc.Label('Limits for the NEIGHBOURHOOD [meters]'),
dcc.RangeSlider(
id='eps',
allowCross=False,
min=100,
max=5000,
marks={100: '100', 500: '500', 1000: '1000', 1500: '1500',
2000: '2000', 2500: '2500', 3000: '3000',
3500: '3500,', 4000: '4000', 4500: '4500',
5000: '5000'},
step=100,
value=[1200, 1700]
),
dbc.Row([
dbc.Col([
dbc.Label('Spans'),
dbc.Input(
debounce=True,
bs_size='sm',
id='spans',
placeholder='',
type='number',
min=0, max=20, step=1,
value='5'),
], width={"size": 4, "offset": 0}, align='center'),
dbc.Col([
dbc.Button(
'Sensitivity',
# size='sm',
color='warning',
id='bt_cluster_sens', n_clicks=0, disabled=False,
style={'textAlign': 'center', 'margin': '10px'},
className='button-primary'),
], width={"size": 6, "offset": 0}, align='end')
], justify="around"),
html.H5(['Cluster Analysis'], style={'margin': '5px',
'color': "#55b298"}),
dbc.Row([
dbc.Col([
dbc.Label('Final EPS'),
dbc.Input(
debounce=True,
bs_size='sm',
id='eps_final',
placeholder='',
type='number',
min=0, max=10000, step=1,
value=1500),
]),
dbc.Col([
dbc.Label('Final minPTS'),
dbc.Input(
debounce=True,
bs_size='sm',
id='pts_final',
placeholder='..',
type='number',
min=0, max=10000, step=1,
value=500),
])
]),
dbc.Collapse(id='collapse_merge', children=[
html.H6(['Choose two clusters to merge'],
style={'textAlign': 'center',
'color': "#55b298",
'margin': '5px'}),
dbc.Row([
dbc.Col([
# dbc.Label('Cluster to merge'),
dbc.Input(
debounce=True,
bs_size='sm',
id='c1_merge',
placeholder='',
type='number',
min=0, max=99, step=1,
value=''),
], width={"size": 3, "offset": 0}, align='center'),
dbc.Col([
# dbc.Label('Cluster'),
dbc.Input(
debounce=True,
bs_size='sm',
id='c2_merge',
placeholder='',
type='number',
min=0, max=99, step=1,
value=''),
], width={"size": 3, "offset": 0}, align='center'),
dbc.Col([
dbc.Button(
'Merge',
# size='sm',
color='warning',
id='bt_merge_clusters', n_clicks=0,
disabled=False,
style={'textAlign': 'center',
'margin': '0px'},
className='button-primary'),
], width={"size": 4, "offset": 0}, align='center')
], justify="around", style={'height': -100}),
]),
dbc.Row([
dbc.Col(
dbc.Button(
'RUN',
size='lg',
color='warning',
id='cluster_analysis', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
dbc.Col(
dbc.Button(
'NEXT',
size='lg',
color='primary',
id='next_step_cluster', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
], justify='around'),
]), width={"size": 3, "offset": 0}),
dbc.Col(html.Div([
dbc.Spinner(size='lg', color="#55b298", children=[
dbc.Tabs([
dbc.Tab(label='Map', label_style={"color": "#55b298"},
children=[
dcc.Graph(
id='output_cluster',
figure=fig,
)]
),
dbc.Tab(label='Sensitivity',
label_style={"color": "#55b298"}, children=[
dcc.Graph(
id='output_sens',
figure=fig,
)]
),
]),
])
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.Div(id='grid_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H2(['Grid Routing'], style={'color': "#55b298"}),
dbc.Checklist(id='full_ele', switch=True, inline=True,
options=[
{'label': 'Total Electrification',
'value': 'y'},
],
value=[],
style={'textAlign': 'center', 'margin': '15px'},
),
dbc.Row([
dbc.Col([
dbc.Label('Population Threshold'),
dbc.Input(
id='pop_thresh',
placeholder='',
debounce=True,
type='number',
min=0, max=1000, step=1,
value='100')
]),
dbc.Col([
dbc.Label('Line Base Cost'),
dbc.Input(
id='line_bc',
placeholder='[€/km]',
debounce=True,
type='number',
value='')
]),
dbc.Col([
dbc.Label('Load per Capita'),
dbc.Input(
id='pop_load',
placeholder='[kW]',
debounce=True,
type='number',
value='')
])
]),
# dbc.Row([
# dbc.Col([
# # dbc.Label(['HV/MV Substation cost'], style={'textSize': '1px'}),
# html.Div(['HV/MV Substation cost'],
# style={'font-size ': '1px'}),
# dbc.Input(
# id='sub_cost_HV',
# placeholder='Enter a value [€]..',
# debounce=True,
# min=0, max=999999999,
# type='number',
# value=''
# ),
# ]),
# dbc.Col([
# dbc.Label('MV/LV Substation cost'),
# dbc.Input(
# id='sub_cost_MV',
# placeholder='Enter a value [€]..',
# debounce=True,
# min=0, max=999999999,
# type='number',
# value=''
# ),
# ])
# ]),
dbc.Checklist(id='branch', switch=True, inline=True,
options=[
{'label': 'Branch Strategy',
'value': 'y'},
],
value=[],
style={'margin': '15px'}
),
dbc.Collapse(id='collapse_branch', children=[
dbc.Row([
dbc.Col([
dbc.Label(
'Population Treshold (Main Branches)'),
dbc.Input(
debounce=True,
id='pop_thresh_lr',
type='number',
min=0, max=1000, step=10,
value='200'),
]),
dbc.Col([
dbc.Label(
'Line base cost (Collaterals)'),
dbc.Input(
debounce=True,
id='line_bc_col',
placeholder='[€/km]',
type='number',
value='')
]),
]),
]),
dbc.Row([
dbc.Col(
dbc.Button(
'RUN',
size='lg',
color='warning',
id='grid_routing', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
dbc.Col(
dbc.Button(
'NEXT',
size='lg',
color='primary',
id='next_step_routing', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
], justify='around'),
]),
width={"size": 3, "offset": 0}, align='center'),
dbc.Col(html.Div([
dbc.Spinner(size='lg', color="#55b298", children=[
dbc.Tabs([
dbc.Tab(label='Map', label_style={"color": "#55b298"},
children=[
dcc.Graph(
id='output_grid',
figure=fig,
)]
),
dbc.Tab(label='Table',
label_style={"color": "#55b298"},
children=[
dash_table.DataTable(
id='datatable_grid',
columns=[],
style_header={
'whiteSpace': 'normal',
'height': 'auto',
'width': '30px',
'textAlign': 'center'},
style_table={'height': '450px'},
page_count=1,
page_current=0,
page_size=13,
page_action='custom',
sort_action='custom',
sort_mode='single',
sort_by=[])]
),
]),
])
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.Div(id='mg_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H2(['Microgrid Sizing'], style={'color': "#55b298"}),
dbc.Checklist(id='import_res', switch=True, inline=True,
options=[
{'label': 'Download RES data',
'value': 'y'},
],
value='y',
style={'textAlign': 'center', 'margin': '15px'},
),
dbc.Row([
dbc.Col([
dbc.Label('Upload a Daily Load Profile'),
dcc.Upload(
id='upload_loadprofile',
children=html.Div([
'Drag and Drop or ',
html.A('Select File')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
),
])
]),
html.Div([
dbc.Label('Wind Turbine Model'),
dcc.Dropdown(
id='wt',
options=[
{'label': 'Nordex N27 150',
'value': 'Nordex N27 150'},
{'label': 'Alstom Eco 80',
'value': 'Alstom Eco 80'},
{'label': 'Enercon E40 500',
'value': 'Enercon E40 500'},
{'label': 'Vestas V27 225',
'value': 'Vestas V27 225'}
],
value='Nordex N27 150'
)
], style={'margin': '15px'}),
dbc.Row([
dbc.Col(
dbc.Button(
'RUN',
size='lg',
color='warning',
id='mg_sizing', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
dbc.Col(
dbc.Button(
'NEXT',
size='lg',
color='primary',
id='next_step_mg', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
], justify='around'),
]),
width={"size": 3, "offset": 0}, align='center'),
dbc.Col(html.Div([
dbc.Spinner(size='lg', color="#55b298", children=[
dbc.Tabs([
dbc.Tab(label='Map', label_style={"color": "#55b298"},
children=[
dcc.Graph(
id='output_mg',
figure=fig,
)]
),
dbc.Tab(label='Microgrids',
label_style={"color": "#55b298"},
children=[
dash_table.DataTable(
id='datatable_mg',
columns=[
{"name": i, "id": i} for i in
mg_columns.columns
],
style_header={
'whiteSpace': 'normal',
'height': 'auto',
'width': '30px',
'textAlign': 'center'
},
style_table={'height': '450px'},
page_count=1,
page_current=0,
page_size=13,
page_action='custom',
sort_action='custom',
sort_mode='single',
sort_by=[])
]),
dbc.Tab(label='Load Profile',
label_style={"color": "#55b298"},
children=[
dbc.Row([
dbc.Col([
dash_table.DataTable(
id='datatable_load_profile',
columns=[{"name": i, "id": i}
for i in
load_profile.columns],
data=lp_data,
editable=True,
style_header={
'whiteSpace': 'normal',
'height': 'auto',
'width': '30px',
'textAlign': 'center'
},
style_table={
'height': '450px'},
page_count=1,
page_current=0,
page_size=13,
page_action='custom'),
], width={"size": 5, "offset": 0},
align='center'),
dbc.Col([
dcc.Graph(id='load_profile_graph')
], width={"size": 7, "offset": 0},
align='center'),
])
]),
]),
])
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.Div(id='npc_div', children=[
dbc.Row([
dbc.Col(html.Div(style={'textAlign': 'center'}, children=[
html.H2(['NPC Analysis'], style={'color': "#55b298"}),
dbc.Row([
dbc.Col([
dbc.Label('Cost of Electricity'),
dbc.Input(
id='coe',
placeholder='[€/kWh]',
debounce=True,
min=0, max=9999,step=0.01,
type='number',
value=''
),
]),
dbc.Col([
dbc.Label('Inflation Rate'),
dbc.Input(
id='grid_ir',
placeholder='[%/year]',
debounce=True,
min=0, max=1, step=0.01,
type='number',
value='0.01'
),
])
]),
dbc.Row([
dbc.Col([
dbc.Label('Grid Lifetime'),
dbc.Input(
id='grid_lifetime',
placeholder='[y]',
debounce=True,
min=1, max=100, step=1,
type='number',
value='40'
),
]),
dbc.Col([
dbc.Label('Grid O&M Costs'),
dbc.Input(
debounce=True,
id='grid_om',
placeholder='[% of total]',
min=0, max=1, step=0.01,
type='number',
value='0.01'
),
])
]),
dbc.Row([
dbc.Col([
dbc.Label('Max Power along lines'),
dbc.Input(
id='p_max_lines',
placeholder='[kW]',
debounce=True,
min=0, max=9999,step=0.01,
type='number',
value=''
),
]),
]),
dcc.Upload(
id='upload_subs',
children=html.Div([
html.A('Import substations .csv file')
]),
style={
'width': '100%',
'height': '30px',
'lineHeight': '30px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px',
},
),
html.Div(id='upload_subs_out'),
dbc.Row([
dbc.Col(
dbc.Button(
'RUN',
size='lg',
color='warning',
id='npc_btn', n_clicks=0,
style={'textAlign': 'center', 'margin': '10px'},
), width={"size": 6, "offset": 0}, align='center'),
], justify='start'),
]),
width={"size": 3, "offset": 0}, align='center'),
dbc.Col(html.Div([
dbc.Spinner(size='lg', color="#55b298", children=[
dbc.Tabs([
dbc.Tab(label='Map', label_style={"color": "#55b298"},
children=[
dcc.Graph(
id='output_npc',
figure=fig,
)]
),
dbc.Tab(label='Table',
label_style={"color": "#55b298"},
children=[
dash_table.DataTable(
id='datatable_grid_final',
columns=[]
,
style_header={
'whiteSpace': 'normal',
'height': 'auto',
'width': '30px',
'textAlign': 'center'
},
style_table={'height': '450px'},
page_count=1,
page_current=0,
page_size=13,
page_action='custom',
sort_action='custom',
sort_mode='single',
sort_by=[]
)
]
),
]),
])
]), width={"size": 8, "offset": 0}, align='center'),
], justify="around"),
]),
html.P(id='config_out', style={'display': 'none'}),
html.P(id='upload_pop_out', style={'display': 'none'}),
html.P(id='studyarea_out', style={'display': 'none'}),
dbc.Tooltip(
"WARNING: This option could greatly increases the computation time, ",
target="full_ele",
),
dbc.Tooltip(
"WARNING: This option could greatly increases the computation time, ",
target="branch",
),
dbc.Tooltip(
"Select a range of values for the input parameters and the number"
"of spans between this interval to show in the sensitivity graph."
"The goal should be to maximize the % of clustered people maintaining "
"a high value of people/km²."
,
target="cluster_sens_header",
),
dbc.Tooltip(
"Provide a set of four points, with coordinates in degrees (EPSG:4326)"
", which will form a rectangular polygon that limits the area under "
"analysis."
,
target="studyarea_label",
),
])
@app.callback(Output('step', 'value'),
[Input('next_step_start', 'n_clicks'),
Input('next_step_gis', 'n_clicks'),
Input('next_step_cluster', 'n_clicks'),
Input('next_step_routing', 'n_clicks'),
Input('next_step_mg', 'n_clicks')])
def go_next_step(next_step_start, next_step_gis, next_step_cluster,
next_step_routing, next_step_mg):
""" Changes the value of the step selection according to which NEXT button
that was pressed"""
button_pressed = dash.callback_context.triggered[0]['prop_id'].split('.')[
0]
if button_pressed == 'next_step_start':
return 1
if button_pressed == 'next_step_gis':
return 2
if button_pressed == 'next_step_cluster':
return 3
if button_pressed == 'next_step_routing':
return 4
if button_pressed == 'next_step_mg':
return 5
else:
return 0
@app.callback(Output('start_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 0:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback(Output('gis_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 1:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback(Output('cluster_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 2:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback(Output('grid_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 3:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback(Output('mg_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 4:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback(Output('npc_div', 'style'),
[Input('step', 'value')])
def change_interface(step):
""" Changes the html.Div of whole page according to the step selected """
if step == 5:
return {'textAlign': 'center'}
else:
return {'display': 'none'}
@app.callback([Output('pop_thresh_lr', 'disabled'),
Output('line_bc_col', 'disabled')],
[Input('branch', 'value')])
def branch_options(branch):
""" Enables or not the options for branch technique according to switch"""
if not branch:
return True, True
else:
return False, False
@app.callback(Output("collapse_gis", "is_open"),
[Input("import_data", "value")])
def toggle_collapse_gis(value):
if isinstance(value, list):
if not value:
return False
elif value[0] == 'y':
return True
return False
@app.callback(Output("collapse_gis2", "is_open"),
[Input("import_data", "value")])
def toggle_collapse_gis(value):
if isinstance(value, list):
if not value:
return True
elif value[0] == 'y':
return False
return True
@app.callback(Output("collapse_merge", "is_open"),
[Input("cluster_analysis", "n_clicks")])
def toggle_collapse_merge(n_clicks):
if n_clicks >= 1:
return True
return False
@app.callback(Output("collapse_branch", "is_open"),
[Input("branch", "value")])
def toggle_collapse_merge(value):
if isinstance(value, list):
if not value:
return False
elif value[0] == 'y':
return True
return False
@app.callback(Output('upload_pop', 'disabled'),
[Input('import_pop', 'value')])
def disable_upload_pop(value):
if value:
return False
else:
return True
@app.callback(Output('upload_pop_out', 'children'),
[Input('upload_pop', 'contents')])
def read_upload_pop(contents):
if contents is not None:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.to_csv(r'Input/imported_pop.csv', index=False)
return '_'
@app.callback(Output('upload_csv_out', 'children'),
[Input('upload_csv', 'contents')],
[State('upload_csv', 'filename')])
def read_upload_csv(contents, filename):
if dash.callback_context.triggered:
if contents is not None:
if not 'csv' in filename:
return html.P('The selected file is not a CSV table.')
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.to_csv(r'Input/imported_csv.csv', index=False)
return html.P('CSV file successfully imported.')
return html.P('No files selected.')
return html.P('No files selected.')
@app.callback(Output('upload_subs_out', 'children'),
[Input('upload_subs', 'contents')],
[State('upload_subs', 'filename')])
def read_upload_csv(contents, filename):
if dash.callback_context.triggered:
if contents is not None:
if not 'csv' in filename:
return html.P('The selected file is not a CSV table.')
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.to_csv(r'Input/imported_subs.csv', index=False)
return html.P('CSV file successfully imported.')
return html.P('No files selected.')
return html.P('No files selected.')
@app.callback(Output('studyarea_out', 'children'),
[Input('lat1', 'value'), Input('lat2', 'value'),
Input('lat3', 'value'), Input('lat4', 'value'),
Input('lon1', 'value'), Input('lon2', 'value'),
Input('lon3', 'value'), Input('lon4', 'value')])
def create_study_area(lat1, lat2, lat3, lat4, lon1, lon2, lon3, lon4):
if dash.callback_context.triggered:
lat_points = [lat1, lat2, lat3, lat4]
lon_points = [lon1, lon2, lon3, lon4]
study_area.geometry = [Polygon(zip(lon_points, lat_points))]
study_area.to_file(r'Input/study_area.shp')
return '_'
@app.callback(Output('config_out', 'children'),
[Input('import_data', 'value'),
Input('crs', 'value'),
Input('resolution', 'value'),
Input('pop_thresh', 'value'),
Input('line_bc', 'value'),
Input('pop_load', 'value'),
Input('branch', 'value'),
Input('pop_thresh_lr', 'value'),
Input('line_bc_col', 'value'),
Input('full_ele', 'value'),
Input('wt', 'value'),
Input('coe', 'value'),
Input('grid_ir', 'value'),
Input('grid_om', 'value'),
Input('grid_lifetime', 'value'),
Input('eps', 'value'),
Input('pts', 'value'),
Input('spans', 'value'),
Input('eps_final', 'value'),
Input('pts_final', 'value'),
Input('c1_merge', 'value'),
Input('c2_merge', 'value'),
Input('landcover_option', 'value'),
Input('p_max_lines','value')])
def configuration(import_data, crs, resolution,
pop_thresh, line_bc, pop_load,
branch, pop_thresh_lr, line_bc_col, full_ele, wt, coe,
grid_ir, grid_om, grid_lifetime, eps, pts, spans, eps_final,
pts_final, c1_merge, c2_merge, landcover_option,p_max_lines):
""" Reads every change of input in the UI and updates the configuration
file accordingly """
ctx = dash.callback_context
if ctx.triggered[0]['value'] is not None:
para_index = config[config['Parameter'] ==
ctx.triggered[0]['prop_id'].split('.')[0]].index
if isinstance(ctx.triggered[0]['value'], list): #if this is just for eps and pts it is substituted by the lines after and can be removed
if not ctx.triggered[0]['value']:
config.loc[para_index, 'Value'] = 'no'
elif ctx.triggered[0]['value'][0] == 'y':
config.loc[para_index, 'Value'] = 'yes'
elif isinstance(ctx.triggered[0]['value'][0], int):
config.values[para_index[0], 1] = ctx.triggered[0]['value']
else:
config.loc[para_index, 'Value'] = ctx.triggered[0]['value']
if ctx.triggered[0]['prop_id'] =='eps.value':
config.iloc[20,1] == ctx.triggered[0]['value'][0]
config.iloc[20,2] == ctx.triggered[0]['value'][1]
elif ctx.triggered[0]['prop_id'] =='pts.value':
config.iloc[21,1] == ctx.triggered[0]['value'][0]
config.iloc[21,2] == ctx.triggered[0]['value'][1]
msg = 'Parameter changed: ' + str(
ctx.triggered[0]['prop_id'].split('.')[0])
print(config)
# todo -> update config csv file, need to check parameters are saved properly
config.to_csv('Input/Configuration.csv',index=False)
else:
raise PreventUpdate
return html.Div(msg)
@app.callback(
Output('datatable_gis', 'page_count'),
[Input('create_df', 'n_clicks')],
[State('import_pop', 'value')])
def create_dataframe(create_df, import_pop_value):
""" Runs the functions for creating the geodataframe when the button RUN
present in the GIS interface is pressed """
data_import = config.iloc[0, 1]
input_csv = 'imported_csv'
crs = int(config.iloc[3, 1])
resolution = float(config.iloc[4, 1])
landcover_option = (config.iloc[27, 1])
unit = 1
step = 1
if dash.callback_context.triggered[0]['prop_id'] == 'create_df.n_clicks':
##### remove all files from previous run ######
for file in os.listdir('Output'):
shutil.rmtree('Output/'+file, ignore_errors=True)
###create new directories ####
os.makedirs('Output/Datasets')
os.makedirs('Output/Clusters')
if data_import == 'yes':
collecting.data_gathering(crs, study_area)
landcover_option = 'CGLS'
if not import_pop_value:
df = processing.create_mesh(study_area, crs, resolution)
else:
imported_pop = pd.read_csv(r'Input/imported_pop.csv')
df = processing.create_mesh(study_area, crs, resolution,
imported_pop)
df_weighted = initialization.weighting(df, resolution,
landcover_option)
geo_df, pop_points = \
initialization.creating_geodataframe(df_weighted, crs,
unit, input_csv, step)
geo_df['Weight'] = geo_df['Weight'].astype('float') #sometimes it is saved as strings
geo_df.to_file(r"Output/Datasets/geo_df_json",
driver='GeoJSON')
else:
df = pd.read_csv(r'Input/' + input_csv + '.csv', sep=',')
print("Input files successfully imported.")
df_weighted = initialization.weighting(df, resolution,
landcover_option)
geo_df, pop_points = \
initialization.creating_geodataframe(df_weighted, crs,
unit, input_csv, step)
geo_df.to_file(r"Output/Datasets/geo_df_json", driver='GeoJSON')
initialization.roads_import(geo_df,crs)
geo_df = geo_df.to_crs(epsg=4326)
# fig2 = go.Figure(go.Scattermapbox(
#
# name='GeoDataFrame',
# lat=geo_df.geometry.y,
# lon=geo_df.geometry.x,
# mode='markers',
# marker=go.scattermapbox.Marker(
# size=10,
# showscale=True,
# color=geo_df.Population,
# opacity=0.8,
# colorbar=dict(title='People')
# ),
# text=list(
# zip(geo_df.ID, geo_df.Weight, geo_df.Population.round(1))),
# hoverinfo='text',
# below="''"
# ))
# fig2.update_layout(mapbox_style="carto-positron")
# fig2.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
# fig2.update_layout(mapbox_zoom=8.5,
# mapbox_center={"lat": geo_df.geometry.y[
# (int(geo_df.shape[0] / 2))],
# "lon": geo_df.geometry.x[
# (int(geo_df.shape[0] / 2))]})
return round(geo_df.shape[0] / 13, 0) + 1
else:
return 1
@app.callback(Output('output_sens', 'figure'),
[Input('bt_cluster_sens', 'n_clicks')])
def cluster_sensitivity(bt_cluster_sens):
""" Checks if the button SENSITIVITY was pressed, if yes run the code and
and changes the bt_out to a value that will call the change_interface
function """
if bt_cluster_sens > 0:
resolution = float(config.iloc[4, 1])
eps_1=float(config.iloc[20, 1])
eps_2 = float(config.iloc[20, 2])
pts_1=float(config.iloc[21, 1])
pts_2 = float(config.iloc[21, 2])
# eps = list(config.iloc[20, 1])
# pts = list(config.iloc[21, 1])
eps =list([eps_1,eps_2])
pts=list([pts_1,pts_2])
spans = int(config.iloc[22, 1])
geo_df = gpd.read_file(r"Output/Datasets/geo_df_json")
loc = {'x': geo_df['X'], 'y': geo_df['Y'], 'z': geo_df['Elevation']}
pop_points = pd.DataFrame(data=loc).values
fig_sens = clustering.sensitivity(resolution, pop_points, geo_df, eps,
pts,
int(spans))
return fig_sens
raise PreventUpdate
@app.callback(Output('output_cluster', 'figure'),
[Input('cluster_analysis', 'n_clicks'),
Input('bt_merge_clusters', 'n_clicks'),
Input('step' , 'value')])
def analysis(cluster_analysis, bt_merge_clusters,step):
""" Checks if the button RUN GISELE was pressed, if yes run the code and
and changes the bt_out to a value that will call the change_interface
function """
eps_final = int(config.iloc[23, 1])
pts_final = int(config.iloc[24, 1])
c1_merge = int(config.iloc[25, 1])
c2_merge = int(config.iloc[26, 1])
button_pressed = [p['prop_id'] for p in dash.callback_context.triggered][0]
pop_load = float(config.iloc[6, 1])
#todo -> improve this visualization, not loading the graph each time this page is entered
if 'cluster_analysis' in button_pressed:
for file in os.listdir('Output'):
if file!='Datasets' and file!='Clusters':
shutil.rmtree('Output/'+file, ignore_errors=True)
geo_df = gpd.read_file(r"Output/Datasets/geo_df_json")
loc = {'x': geo_df['X'], 'y': geo_df['Y'], 'z': geo_df['Elevation']}
pop_points = pd.DataFrame(data=loc).values
geo_df_clustered, clusters_list = \
clustering.analysis(pop_points, geo_df, pop_load,
eps_final, pts_final)
fig_clusters = clustering.plot_clusters(geo_df_clustered,
clusters_list)
clusters_list.to_csv(r"Output/Clusters/clusters_list.csv",
index=False)
geo_df_clustered.to_file(r"Output/Clusters/geo_df_clustered.json",
driver='GeoJSON')
return fig_clusters
elif 'bt_merge_clusters' in button_pressed:
geo_df_clustered = \
gpd.read_file(r"Output/Clusters/geo_df_clustered.json")
geo_df_clustered.loc[geo_df_clustered['Cluster'] ==
c2_merge, 'Cluster'] = c1_merge
clusters_list = pd.read_csv(r"Output/Clusters/clusters_list.csv")
drop_index = \
clusters_list.index[clusters_list['Cluster'] == c2_merge][0]
clusters_list = clusters_list.drop(index=drop_index)
fig_merged = clustering.plot_clusters(geo_df_clustered,
clusters_list)
clusters_list.to_csv(r"Output/Clusters/clusters_list.csv",
index=False)
geo_df_clustered.to_file(r"Output/Clusters/geo_df_clustered.json",
driver='GeoJSON')
return fig_merged
elif step ==2:
if os.path.isfile(r'Output/Clusters/geo_df_clustered.json') and os.path.isfile(r'Output/Clusters/clusters_list.csv'):
geo_df_clustered = \
gpd.read_file(r"Output/Clusters/geo_df_clustered.json")
clusters_list = pd.read_csv(r"Output/Clusters/clusters_list.csv")
fig_clusters = clustering.plot_clusters(geo_df_clustered,
clusters_list)
return fig_clusters
raise PreventUpdate
@app.callback(Output('output_grid', 'figure'),
[Input('grid_routing', 'n_clicks')])
def routing(grid_routing):
if grid_routing >= 1:
for file in os.listdir('Output'):
if file not in ('Datasets','Clusters','Grids','Branches'):
shutil.rmtree('Output/' + file, ignore_errors=True)
input_csv = 'imported_csv'
input_sub = 'imported_subs'
resolution = float(config.iloc[4, 1])
pop_load = float(config.iloc[6, 1])
pop_thresh = float(config.iloc[7, 1])
line_bc = float(config.iloc[8, 1])
sub_cost_hv = float(config.iloc[9, 1])
sub_cost_mv = float(config.iloc[10, 1])
branch = config.iloc[11, 1]
pop_thresh_lr = float(config.iloc[12, 1])
line_bc_col = float(config.iloc[13, 1])
full_ele = config.iloc[14, 1]
geo_df = gpd.read_file(r"Output/Datasets/geo_df_json")
geo_df_clustered = \
gpd.read_file(r"Output/Clusters/geo_df_clustered.json")
clusters_list = pd.read_csv(r"Output/Clusters/clusters_list.csv")
clusters_list.index = clusters_list.Cluster.values
if branch == 'no':
shutil.rmtree('Output/Grids', ignore_errors=True)
os.makedirs('Output/Grids')
grid_resume, gdf_roads, roads_segments = \
grid.routing(geo_df_clustered, geo_df, clusters_list,
resolution, pop_thresh, line_bc,
full_ele)
# grid_resume_opt = optimization.connections(geo_df, grid_resume,
# resolution, line_bc,
# branch, input_sub,
# gdf_roads,
# roads_segments)
fig_grid = results.graph(geo_df_clustered, clusters_list, branch,
grid_resume, pop_thresh,
full_ele)
elif branch == 'yes':
shutil.rmtree('Output/Branches', ignore_errors=True)
os.makedirs('Output/Branches')
gdf_lr = branches.reduce_resolution(input_csv, geo_df, resolution,
geo_df_clustered,
clusters_list)
grid_resume, substations, gdf_roads, roads_segments = \
branches.routing(geo_df_clustered, geo_df, clusters_list,
resolution, pop_thresh, input_sub, line_bc,
sub_cost_hv, sub_cost_mv, pop_load, gdf_lr,
pop_thresh_lr, line_bc_col, full_ele)
# grid_resume_opt = optimization.connections(geo_df, grid_resume,
# resolution, line_bc,
# branch, input_sub,
# gdf_roads,
# roads_segments)
fig_grid = results.graph(geo_df_clustered, clusters_list, branch,
grid_resume, pop_thresh,
full_ele)
return fig_grid
else:
return fig
@app.callback(Output('output_mg', 'figure'),
[Input('mg_sizing', 'n_clicks')])
def microgrid_size(mg_sizing):
grid_lifetime = int(config.iloc[19, 1])
wt = (config.iloc[15, 1])
if mg_sizing > 0:
for file in os.listdir('Output'):
if file not in ('Datasets','Clusters','Grids','Branches'):
shutil.rmtree('Output/' + file, ignore_errors=True)
os.makedirs('Output/Microgrids')
geo_df_clustered = \
gpd.read_file(r"Output/Clusters/geo_df_clustered.json")
clusters_list = pd.read_csv(r"Output/Clusters/clusters_list.csv")
clusters_list.index = clusters_list.Cluster.values
yearly_profile, years, total_energy = load(clusters_list,
grid_lifetime,
input_profile)
mg = sizing(yearly_profile, clusters_list, geo_df_clustered, wt, years)
fig_mg=results.graph_mg(mg,geo_df_clustered,clusters_list)
return fig_mg
else:
return fig
@app.callback(Output('output_npc', 'figure'),
[Input('npc_btn', 'n_clicks')])
def npc_computation(npc_btn):
global final_npc
branch = config.iloc[11, 1]
full_ele = config.iloc[14, 1]
input_sub = 'imported_subs'
pop_thresh = float(config.iloc[7, 1])
coe = float(config.iloc[16, 1])
p_max_lines =float(config.iloc[28, 1])
grid_om = float(config.iloc[18, 1])
grid_ir = float(config.iloc[17, 1])
grid_lifetime = int(config.iloc[19, 1])
resolution = float(config.iloc[4, 1])
line_bc = float(config.iloc[8, 1])
if npc_btn > 0:
for file in os.listdir('Output'):
if file not in ('Datasets','Clusters','Grids','Branches','Microgrids'):
shutil.rmtree('Output/' + file, ignore_errors=True)
os.makedirs('Output/NPC')
geo_df = gpd.read_file(r"Output/Datasets/geo_df_json")
geo_df_clustered = \
gpd.read_file(r"Output/Clusters/geo_df_clustered.json")
clusters_list = pd.read_csv(r"Output/Clusters/clusters_list.csv")
clusters_list.index = clusters_list.Cluster.values
substations = pd.read_csv(r'Input/' + input_sub + '.csv')
geometry = [Point(xy) for xy in
zip(substations['X'], substations['Y'])]
substations = gpd.GeoDataFrame(substations, geometry=geometry,
crs=geo_df.crs)
mg = pd.read_csv('Output/Microgrids/microgrids.csv')
mg.index = mg.Cluster.values
total_energy = pd.read_csv('Output/Microgrids/Grid_energy.csv')
total_energy.index = total_energy.Cluster.values
if branch == 'yes':
grid_resume = pd.read_csv(r'Output/Branches/grid_resume.csv')
grid_resume.index = grid_resume.Cluster.values
else:
grid_resume = pd.read_csv(r'Output/Grids/grid_resume.csv')
grid_resume.index = grid_resume.Cluster.values
# all_connections_opt = \
# gpd.read_file(r'Output/Grids/all_connections_opt.shp')
grid_resume_opt = \
optimization.milp_npc(geo_df_clustered, grid_resume,
substations, mg, total_energy, grid_om, coe,
grid_ir, grid_lifetime, branch, line_bc,
resolution,p_max_lines)
fig_grid = results.graph(geo_df_clustered, clusters_list, branch,
grid_resume_opt, pop_thresh,
full_ele, substations)
if branch=='yes':
file='Output/Branches/all_connections_opt'
if os.path.isfile(file+'.shp'):
results.line_break(file, fig_grid, 'black')
else:
file='Output/Grids/all_connections_opt'
if os.path.isfile(file+'.shp'):
results.line_break(file, fig_grid, 'black')
# else:
# file = 'Output/Grids/all_connections_opt'
# try:
# f = open(file + 'shp')
# results.line_break(file, fig_grid, 'black')
# except IOError:
# print("File not accessible")
# finally:
# f.close()
# final_lcoe = lcoe_analysis(clusters_list, total_energy,
# grid_resume_opt, mg, coe, grid_ir, grid_om,
# grid_lifetime)
return fig_grid
else:
return fig
@app.callback([Output('datatable_gis', 'data'),
Output('output_gis', 'figure')],
[Input('datatable_gis', "page_current"),
Input('datatable_gis', "page_size"),
Input('datatable_gis', "page_count")])
def update_table(page_current, page_size,page_count):
if os.path.isfile(r'Output/Datasets/geo_df_json'):
geo_df2 = pd.DataFrame(
gpd.read_file(r"Output/Datasets/geo_df_json").drop(
columns='geometry'))
geo_df=gpd.read_file(r"Output/Datasets/geo_df_json")
geo_df = geo_df.to_crs(epsg=4326)
fig2 = go.Figure(go.Scattermapbox(
name='GeoDataFrame',
lat=geo_df.geometry.y,
lon=geo_df.geometry.x,
mode='markers',
marker=go.scattermapbox.Marker(
size=10,
showscale=True,
color=geo_df.Population,
opacity=0.8,
colorbar=dict(title='People')
),
text=list(
zip(geo_df.ID, geo_df.Weight, geo_df.Population.round(1))),
hoverinfo='text',
below="''"
))
fig2.update_layout(mapbox_style="carto-positron")
fig2.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig2.update_layout(mapbox_zoom=8.5,
mapbox_center={"lat": geo_df.geometry.y[
(int(geo_df.shape[0] / 2))],
"lon": geo_df.geometry.x[
(int(geo_df.shape[0] / 2))]})
else:
geo_df2 = pd.DataFrame()
fig2 = dash.no_update
return geo_df2.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records'), fig2
@app.callback([Output('datatable_grid', 'data'),
Output('datatable_grid', 'columns')],
[Input('datatable_grid', "page_current"),
Input('datatable_grid', "page_size"),
Input('datatable_grid', 'sort_by'),
Input('output_grid', "figure")],
[State('branch', 'value')])
def update_table(page_current, page_size, sort_by, output_grid, branches):
branch = config.iloc[11, 1]
geo_df2 = pd.DataFrame()
if branch == 'no':
if os.path.isfile(r'Output/Grids/grid_resume.csv'):
geo_df2 = pd.read_csv(r'Output/Grids/grid_resume.csv')
geo_df2 = geo_df2.round(2)
if len(sort_by):
geo_df2 = geo_df2.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False)
if branch == 'yes':
if os.path.isfile(r'Output/Branches/grid_resume.csv'):
geo_df2 = pd.read_csv(r'Output/Branches/grid_resume.csv')
geo_df2 = geo_df2.round(2)
if len(sort_by):
geo_df2 = geo_df2.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False)
geo_df2 = geo_df2.dropna(axis=1, how='all')
columns = [{"name": i, "id": i} for i in geo_df2.columns]
return geo_df2.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records'), columns
@app.callback(Output('datatable_mg', 'data'),
[Input('datatable_mg', "page_current"),
Input('datatable_mg', "page_size"),
Input('datatable_mg', 'sort_by'),
Input('output_mg', "figure")])
def update_table(page_current, page_size, sort_by, output_mg):
if os.path.isfile(r'Output/Microgrids/microgrids.csv'):
geo_df2 = pd.read_csv(r'Output/Microgrids/microgrids.csv')
geo_df2 = geo_df2.round(2)
if len(sort_by):
geo_df2 = geo_df2.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False)
else:
geo_df2 = pd.DataFrame()
return geo_df2.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records')
@app.callback(Output('load_profile_graph', 'figure'),
[Input('datatable_load_profile', 'data'),
Input('output_mg', "figure")])
def update_table(datatable_load_profile, output_mg):
lp = | pd.DataFrame(datatable_load_profile) | pandas.DataFrame |
import pandas
from functools import reduce
from .. import SystemClass
from ..decorators import actions, mode
from typing import Iterable, Tuple, Callable
@mode
def run_static_pf(
distSys: SystemClass,
actions: Iterable[Callable] = (lambda distSys: None,),
tools: Iterable[Callable] = (lambda distSys: None,),
) -> tuple:
"""
Run the static power flow mode.
To see how it works, see [Learning DSSData](../tutorial/#static-power-flow).
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
actions: Actions functions.
tools: Tools functions.
Returns:
Tools functions returns
""" # noqa
[action(distSys) for action in actions]
distSys.run_command("set mode=Snap")
distSys.dss.Solution.Solve()
return tuple(tool(distSys) for tool in tools)
@actions
def cfg_tspf(
distSys: SystemClass, step_size: str = "1h", initial_time: tuple = (0, 0)
) -> None:
"""
Set the time series mode in the distribution system.
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
step_size: The size of step time. See "Stepsize" in [OpenDSS User Manual](http://svn.code.sf.net/p/electricdss/code/trunk/Distrib/Doc/OpenDSSManual.pdf).
initial_time: See "Time" in [OpenDSS User Manual](http://svn.code.sf.net/p/electricdss/code/trunk/Distrib/Doc/OpenDSSManual.pdf).
""" # noqa
cmd = f"set mode=daily stepsize={step_size} "
cmd2 = f'time = "{initial_time[0]}, {initial_time[1]}"'
distSys.run_command(cmd + cmd2)
@mode
def run_tspf(
distSys: SystemClass,
num_steps: int,
actions: Iterable[Callable] = (lambda distSys: None,),
tools: Iterable[Callable] = (lambda distSys: None,),
) -> Tuple[pandas.DataFrame]:
"""
Run the time series power flow.
To see how it works, see [Learning DSSData](../tutorial/#time-series-power-flow).
Args:
distSys: An instance of [SystemClass][dssdata.SystemClass]
num_steps : Number of time steps.
actions: Actions functions.
tools: Tools functions.
Returns:
Tools functions returns for all steps
""" # noqa
def concat_dfs(list_df1, list_df2):
"""
Concatena dois conjuntos de DF par a par. Ex:
list_df1 = [df1, df2, df3]
list_df2 = [df4, df5, df6]
return [pd.concat(df1,df4), pd.concat(df2,df5), pd.concat(df3,df6)]
Args:
list_df1 ([type]): [description]
list_df2 ([type]): [description]
Returns:
[type]: [description]
"""
return map(
lambda df1, df2: | pandas.concat([df1, df2], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 16:22:57 2020
@author: Natalie
"""
import os
import sys
import click
import pickle
import pandas as pd
import numpy as np
import geopandas as gpd
import imageio
from shapely.geometry import Point
import json
from bokeh.io import output_file
from bokeh.plotting import figure, show
from bokeh.models import (BasicTicker, CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter, FactorRange,
GeoJSONDataSource, HoverTool, Legend,
LinearColorMapper, PrintfTickFormatter, Slider, Whisker)
from bokeh.layouts import row, column, gridplot, grid, widgetbox
from bokeh.models.widgets import Tabs, Panel
from bokeh.palettes import brewer
from bokeh.transform import transform, factor_cmap
import click # command-line interface
from yaml import load, dump, SafeLoader # pyyaml library for reading the parameters.yml file
from microsim.column_names import ColumnNames
# Functions for preprocessing
# ---------------------------
def calc_nr_days(data_file):
# figure out nr days by reading in e.g. retail dangers pickle file of run 0
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith(ColumnNames.LOCATION_DANGER)]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
nr_days = len(filter_col)
return nr_days
def create_venue_dangers_dict(locations_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs):
'''
Reads in venue pickle files (venues from locations_dict) and populates dangers_dict_3d (raw data: venue, day, run), dangers_dict (mean across runs) and dangers_dict_std (standard deviation across runs)
Possible output includes:
dangers_dict # mean (value to be plotted)
dangers_dict_std # standard deviation (could plot as error bars)
dangers_dict_3d # full 3D data (for debugging)
'''
dangers_dict = {}
dangers_dict_std = {}
dangers_dict_3d = {}
for key, value in locations_dict.items():
#for r in range(nr_runs):
for r in r_range:
data_file = os.path.join(data_dir, f"{r}",f"{locations_dict[key]}.pickle")
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith('Danger')]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
#nr_days = len(filter_col)
# # set row index to ID
# dangers.set_index('ID', inplace = True)
dangers_colnames = filter_col[start_day:end_day+1]
dangers_rownames = dangers.index
dangers_values = dangers[filter_col[start_day:end_day+1]]
if r == start_run:
dangers_3d = np.zeros((dangers.shape[0],dangers_values.shape[1],nr_runs))
dangers_3d[:,:,r-start_run] = dangers_values
dangers_dict_3d[key] = dangers_3d
dangers_dict[key] = pd.DataFrame(data=dangers_3d.mean(axis=2), index=dangers_rownames, columns=dangers_colnames)
dangers_dict_std[key] = pd.DataFrame(data=dangers_3d.std(axis=2), index=dangers_rownames, columns=dangers_colnames)
return dangers_dict, dangers_dict_std, dangers_dict_3d
def create_difference_dict(dict_sc0,dict_sc1,lookup_dict):
dict_out = {}
for key, value in lookup_dict.items():
dict_out[key] = dict_sc1[key].subtract(dict_sc0[key])
return dict_out
def create_msoa_dangers_dict(dangers_dict,keys,msoa_codes):
'''
Converts dangers_dict to MSOA level data for the appropriate venue types. Produces average danger score (sum dangers in MSOA / total nr venues in MSOA)
Output: dangers_msoa_dict
'''
dangers_msoa_dict = {}
for k in range(0,len(keys)):
dangers = dangers_dict[keys[k]]
msoa_code = msoa_codes[k]
dangers['MSOA'] = msoa_code
# count nr for this condition per area
msoa_sum = dangers.groupby(['MSOA']).agg('sum')
msoa_count = dangers.groupby(['MSOA']).agg('count')
msoa_avg = msoa_sum.div(msoa_count, axis='index')
dangers_msoa_dict[keys[k]] = msoa_avg
return dangers_msoa_dict
def create_counts_dict(conditions_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs,age_cat):
'''
Counts per condition (3D, mean and standard deviation)
Produces 5 types of counts:
msoacounts: nr per msoa and day
agecounts: nr per age category and day
totalcounts: nr per day (across all areas)
cumcounts: nr per MSOA and day
uniquecounts: nr with 'final' disease status across time period e.g. someone who is presymptomatic, symptomatic and recoverd is only counted once as recovered
Output:
msoas # list of msoas
totalcounts_dict, cumcounts_dict, agecounts_dict, msoacounts_dict, cumcounts_dict_3d, totalcounts_dict_std, cumcounts_dict_std, agecounts_dict_std, msoacounts_dict_std, totalcounts_dict_3d, agecounts_dict_3d, msoacounts_dict_3d, uniquecounts_dict_3d, uniquecounts_dict_std, uniquecounts_dict
'''
# start with empty dictionaries
msoas = []
msoacounts_dict_3d = {}
totalcounts_dict_3d = {}
cumcounts_dict_3d = {}
agecounts_dict_3d = {}
uniquecounts_dict_3d = {}
msoacounts_dict = {}
agecounts_dict = {}
totalcounts_dict = {}
cumcounts_dict = {}
uniquecounts_dict = {}
msoacounts_dict_std = {}
agecounts_dict_std = {}
totalcounts_dict_std = {}
cumcounts_dict_std = {}
uniquecounts_dict_std = {}
nr_days = end_day - start_day + 1
dict_days = [] # empty list for column names 'Day0' etc
for d in range(start_day, end_day+1):
dict_days.append(f'Day{d}')
age_cat_str = []
for a in range(age_cat.shape[0]):
age_cat_str.append(f"{age_cat[a,0]}-{age_cat[a,1]}")
# first, create 3d dictionaries
for r in r_range:
# read in pickle file individuals (disease status)
data_file = os.path.join(data_dir, f"{r}", "Individuals.pickle")
pickle_in = open(data_file,"rb")
individuals_tmp = pickle.load(pickle_in)
pickle_in.close()
# if first ever run, keep copy and initialise 3D frame for aggregating
if r == start_run:
individuals = individuals_tmp.copy()
msoas.extend(sorted(individuals.area.unique())) # populate list of msoas (previously empty outside this function)
area_individuals = individuals['area'] # keep area per person to use later
# next bit of code is to restrict to user specified day range
# first, find all columns starting with disease_status
filter_col = [col for col in individuals if col.startswith('disease_status')]
# don't use the column simply called 'disease_status'
filter_col = filter_col[1:len(filter_col)]
counts_colnames = filter_col[start_day:end_day+1]
# User defined age brackets
individuals.insert(7, 'Age0', np.zeros((len(individuals),1)))
for a in range(age_cat.shape[0]):
individuals['Age0'] = np.where((individuals['age'] >= age_cat[a,0]) & (individuals['age'] <= age_cat[a,1]), a+1, individuals['Age0'])
age_cat_col = individuals['Age0'].values
# temporary workaround if no continuous age
#age_cat_col = individuals['Age1'].values
# add age brackets column to individuals_tmp
individuals_tmp.insert(7, 'Age0', age_cat_col)
uniquecounts_df = pd.DataFrame()
# select right columns
subset = individuals_tmp[counts_colnames]
for key, value in conditions_dict.items():
#print(key)
if r == start_run:
msoacounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
cumcounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
agecounts_dict_3d[key] = np.zeros((age_cat.shape[0],nr_days,nr_runs))
totalcounts_dict_3d[key] = np.zeros((nr_days,nr_runs))
uniquecounts_dict_3d[key] = np.zeros(nr_runs)
# find all rows with condition (dict value)
indices = subset[subset.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
cumcounts_end = pd.DataFrame(np.zeros((subset.shape[0], 1)))
cumcounts_end.loc[indices] = 1
uniquecounts_df[key] = cumcounts_end.values[:,0]
# loop aroud days
msoacounts_run = np.zeros((len(msoas),nr_days))
cumcounts_run = np.zeros((len(msoas),nr_days))
agecounts_run = np.zeros((age_cat.shape[0],nr_days))
for day in range(0, nr_days):
#print(day)
# count nr for this condition per area
msoa_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['area']).agg({subset.columns[day]: ['count']})
if msoa_count_temp.shape[0] == len(msoas):
msoa_count_temp = msoa_count_temp.values
msoacounts_run[:,day] = msoa_count_temp[:, 0]
elif msoa_count_temp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
msoa_count_temp.columns = msoa_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, msoa_count_temp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
msoacounts_run[:,day] = tmp_df.iloc[:,1].values
# cumulative counts
# select right columns
tmp_cum = subset.iloc[:,0:day+1]
indices = tmp_cum[tmp_cum.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
tmp_df = pd.DataFrame(np.zeros((tmp_cum.shape[0], 1)))
tmp_df.loc[indices] = 1
# merge with MSOA df
tmp_df = tmp_df.merge(area_individuals, left_index=True, right_index=True)
cumcounts_tmp = tmp_df.groupby(['area']).sum()
if cumcounts_tmp.shape[0] == len(msoas):
cumcounts_tmp = cumcounts_tmp.values
cumcounts_run[:,day] = cumcounts_tmp[:, 0]
elif cumcounts_tmp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
cumcounts_tmp.columns = cumcounts_tmp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, cumcounts_tmp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
cumcounts_run[:,day] = tmp_df.iloc[:,1].values
# count nr for this condition per age bracket
age_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['Age0']).agg({subset.columns[day]: ['count']})
if age_count_temp.shape[0] == 6:
age_count_temp = age_count_temp.values
agecounts_run[:,day] = age_count_temp[:, 0]
elif age_count_temp.empty == False:
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(age_cat.shape[0]), columns = ['tmp'], index=list(range(1,age_cat.shape[0]+1)))
# drop multilevel index to prevent warning msg
age_count_temp.columns = age_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = | pd.merge(tmp_df, age_count_temp, how='left', left_index=True,right_index=True) | pandas.merge |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.core.broadcaster import Broadcaster
foo_bar_series = pd.Series({'foo': 1.0, 'bar': 2.0})
foo_bar_series_twice_in_frame = pd.DataFrame([foo_bar_series, foo_bar_series])
series_named_index = foo_bar_series.copy()
series_named_index.index.name = 'idx1'
foo_bar_frame = pd.DataFrame({'foo': [1.0, 1.5], 'bar': [2.0, 1.5]})
def test_broadcast_series_to_array():
param, obj = Broadcaster(foo_bar_series).broadcast([1.0, 2.0])
pd.testing.assert_series_equal(param, pd.Series([1.0, 2.0]))
pd.testing.assert_frame_equal(foo_bar_series_twice_in_frame, obj)
def test_broadcast_frame_to_array_match():
param, obj = Broadcaster(foo_bar_frame).broadcast([1.0, 2.0])
np.testing.assert_array_equal(param, [1.0, 2.0])
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_frame_to_array_mismatch():
with pytest.raises(ValueError, match=r"Dimension mismatch. "
"Cannot map 3 value array-like to a 2 element DataFrame signal."):
Broadcaster(foo_bar_frame).broadcast([1.0, 2.0, 3.0])
def test_broadcast_series_to_scalar():
param, obj = Broadcaster(foo_bar_series).broadcast(1.0)
assert param == 1.0
pd.testing.assert_series_equal(foo_bar_series, obj)
def test_broadcast_frame_to_scalar():
param, obj = Broadcaster(foo_bar_frame).broadcast(1.0)
expected_param = pd.Series([1.0, 1.0], index=foo_bar_frame.index)
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_frame_equal(foo_bar_frame, obj)
def test_broadcast_series_index_named_to_series_index_named():
series = pd.Series([5.0, 6.0], index=pd.Index(['x', 'y'], name='idx2'))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 'x'): 5.0,
('foo', 'y'): 6.0,
('bar', 'x'): 5.0,
('bar', 'y'): 6.0
})
expected_obj = pd.Series({
('foo', 'x'): 1.0,
('foo', 'y'): 1.0,
('bar', 'x'): 2.0,
('bar', 'y'): 2.0
})
expected_obj.index.names = ['idx1', 'idx2']
expected_param.index.names = ['idx1', 'idx2']
pd.testing.assert_series_equal(expected_param, param)
pd.testing.assert_series_equal(expected_obj, obj)
def test_broadcast_series_index_named_to_series_index_none():
series = pd.Series([5.0, 6.0], index=pd.Index([3, 4]))
param, obj = Broadcaster(series_named_index).broadcast(series)
expected_param = pd.Series({
('foo', 3): 5.0,
('foo', 4): 6.0,
('bar', 3): 5.0,
('bar', 4): 6.0
})
expected_obj = pd.Series({
('foo', 3): 1.0,
('foo', 4): 1.0,
('bar', 3): 2.0,
('bar', 4): 2.0
})
expected_obj.index.names = ['idx1', None]
expected_param.index.names = ['idx1', None]
pd.testing.assert_series_equal(expected_param, param)
| pd.testing.assert_series_equal(expected_obj, obj) | pandas.testing.assert_series_equal |
from __future__ import division
import os
import os.path as op
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nibabel as nib
from nipype import (Workflow, Node, MapNode, JoinNode,
IdentityInterface, DataSink)
from nipype.interfaces.base import traits, TraitedSpec
from nipype.interfaces import fsl, freesurfer as fs
from .. import signals
from ..utils import LymanInterface, SaveInfo
from ..visualizations import Mosaic, CarpetPlot
def define_preproc_workflow(info, subjects, sessions, qc=True):
# --- Workflow parameterization and data input
scan_info = info.scan_info
experiment = info.experiment_name
iterables = generate_iterables(scan_info, experiment, subjects, sessions)
subject_iterables, session_iterables, run_iterables = iterables
subject_iterables = subjects
subject_source = Node(IdentityInterface(["subject"]),
name="subject_source",
iterables=("subject", subject_iterables))
session_source = Node(IdentityInterface(["subject", "session"]),
name="session_source",
itersource=("subject_source", "subject"),
iterables=("session", session_iterables))
run_source = Node(IdentityInterface(["subject", "session", "run"]),
name="run_source",
itersource=("session_source", "session"),
iterables=("run", run_iterables))
session_input = Node(SessionInput(data_dir=info.data_dir,
proc_dir=info.proc_dir,
fm_template=info.fm_template,
phase_encoding=info.phase_encoding),
"session_input")
run_input = Node(RunInput(experiment=experiment,
data_dir=info.data_dir,
proc_dir=info.proc_dir,
sb_template=info.sb_template,
ts_template=info.ts_template,
crop_frames=info.crop_frames),
name="run_input")
# --- Warpfield estimation using topup
# Distortion warpfield estimation
# TODO figure out how to parameterize for testing
# topup_config = op.realpath(op.join(__file__, "../../../topup_fast.cnf"))
topup_config = "b02b0.cnf"
estimate_distortions = Node(fsl.TOPUP(config=topup_config),
"estimate_distortions")
# Post-process the TOPUP outputs
finalize_unwarping = Node(FinalizeUnwarping(), "finalize_unwarping")
# --- Registration of SE-EPI (without distortions) to Freesurfer anatomy
fm2anat = Node(fs.BBRegister(init="fsl",
contrast_type="t2",
registered_file=True,
out_fsl_file="sess2anat.mat",
out_reg_file="sess2anat.dat"),
"fm2anat")
fm2anat_qc = Node(AnatRegReport(data_dir=info.data_dir), "fm2anat_qc")
# --- Registration of SBRef to SE-EPI (with distortions)
sb2fm = Node(fsl.FLIRT(dof=6, interp="spline"), "sb2fm")
sb2fm_qc = Node(CoregGIF(out_file="coreg.gif"), "sb2fm_qc")
# --- Motion correction of time series to SBRef (with distortions)
ts2sb = Node(fsl.MCFLIRT(save_mats=True, save_plots=True),
"ts2sb")
ts2sb_qc = Node(RealignmentReport(), "ts2sb_qc")
# --- Combined motion correction, unwarping, and template registration
# Combine pre-and post-warp linear transforms
combine_premats = MapNode(fsl.ConvertXFM(concat_xfm=True),
"in_file", "combine_premats")
combine_postmats = Node(fsl.ConvertXFM(concat_xfm=True),
"combine_postmats")
# Transform Jacobian images into the template space
transform_jacobian = Node(fsl.ApplyWarp(relwarp=True),
"transform_jacobian")
# Apply rigid transforms and nonlinear warpfield to time series frames
restore_timeseries = MapNode(fsl.ApplyWarp(interp="spline", relwarp=True),
["in_file", "premat"],
"restore_timeseries")
# Apply rigid transforms and nonlinear warpfield to template frames
restore_template = MapNode(fsl.ApplyWarp(interp="spline", relwarp=True),
["in_file", "premat", "field_file"],
"restore_template")
# Perform final preprocessing operations on timeseries
finalize_timeseries = Node(FinalizeTimeseries(experiment=experiment),
"finalize_timeseries")
# Perform final preprocessing operations on template
finalize_template = JoinNode(FinalizeTemplate(experiment=experiment),
name="finalize_template",
joinsource="run_source",
joinfield=["mean_files", "tsnr_files",
"mask_files", "noise_files"])
# --- Workflow ouptut
save_info = Node(SaveInfo(info_dict=info.trait_get()), "save_info")
template_output = Node(DataSink(base_directory=info.proc_dir,
parameterization=False),
"template_output")
timeseries_output = Node(DataSink(base_directory=info.proc_dir,
parameterization=False),
"timeseries_output")
# === Assemble pipeline
cache_base = op.join(info.cache_dir, info.experiment_name)
workflow = Workflow(name="preproc", base_dir=cache_base)
# Connect processing nodes
processing_edges = [
(subject_source, session_source,
[("subject", "subject")]),
(subject_source, run_source,
[("subject", "subject")]),
(session_source, run_source,
[("session", "session")]),
(session_source, session_input,
[("session", "session")]),
(run_source, run_input,
[("run", "run")]),
# Phase-encode distortion estimation
(session_input, estimate_distortions,
[("fm_file", "in_file"),
("phase_encoding", "encoding_direction"),
("readout_times", "readout_times")]),
(session_input, finalize_unwarping,
[("session_tuple", "session_tuple"),
("fm_file", "raw_file"),
("phase_encoding", "phase_encoding")]),
(estimate_distortions, finalize_unwarping,
[("out_corrected", "corrected_file"),
("out_warps", "warp_files"),
("out_jacs", "jacobian_files")]),
# Registration of corrected SE-EPI to anatomy
(session_input, fm2anat,
[("subject", "subject_id")]),
(finalize_unwarping, fm2anat,
[("corrected_file", "source_file")]),
# Registration of each frame to SBRef image
(run_input, ts2sb,
[("ts_file", "in_file"),
("sb_file", "ref_file")]),
(ts2sb, finalize_timeseries,
[("par_file", "mc_file")]),
# Registration of SBRef volume to SE-EPI fieldmap
(run_input, sb2fm,
[("sb_file", "in_file")]),
(finalize_unwarping, sb2fm,
[("raw_file", "reference"),
("mask_file", "ref_weight")]),
# Single-interpolation spatial realignment and unwarping
(ts2sb, combine_premats,
[("mat_file", "in_file")]),
(sb2fm, combine_premats,
[("out_matrix_file", "in_file2")]),
(fm2anat, combine_postmats,
[("out_fsl_file", "in_file")]),
(session_input, combine_postmats,
[("reg_file", "in_file2")]),
(run_input, transform_jacobian,
[("anat_file", "ref_file")]),
(finalize_unwarping, transform_jacobian,
[("jacobian_file", "in_file")]),
(combine_postmats, transform_jacobian,
[("out_file", "premat")]),
(run_input, restore_timeseries,
[("ts_frames", "in_file")]),
(run_input, restore_timeseries,
[("anat_file", "ref_file")]),
(combine_premats, restore_timeseries,
[("out_file", "premat")]),
(finalize_unwarping, restore_timeseries,
[("warp_file", "field_file")]),
(combine_postmats, restore_timeseries,
[("out_file", "postmat")]),
(run_input, finalize_timeseries,
[("run_tuple", "run_tuple"),
("anat_file", "anat_file"),
("seg_file", "seg_file"),
("mask_file", "mask_file")]),
(transform_jacobian, finalize_timeseries,
[("out_file", "jacobian_file")]),
(restore_timeseries, finalize_timeseries,
[("out_file", "in_files")]),
(session_input, restore_template,
[("fm_frames", "in_file"),
("anat_file", "ref_file")]),
(estimate_distortions, restore_template,
[("out_mats", "premat"),
("out_warps", "field_file")]),
(combine_postmats, restore_template,
[("out_file", "postmat")]),
(session_input, finalize_template,
[("session_tuple", "session_tuple"),
("seg_file", "seg_file"),
("anat_file", "anat_file")]),
(transform_jacobian, finalize_template,
[("out_file", "jacobian_file")]),
(restore_template, finalize_template,
[("out_file", "in_files")]),
(finalize_timeseries, finalize_template,
[("mean_file", "mean_files"),
("tsnr_file", "tsnr_files"),
("mask_file", "mask_files"),
("noise_file", "noise_files")]),
# --- Persistent data storage
# Ouputs associated with each scanner run
(finalize_timeseries, timeseries_output,
[("output_path", "container"),
("out_file", "@func"),
("mean_file", "@mean"),
("mask_file", "@mask"),
("tsnr_file", "@tsnr"),
("noise_file", "@noise"),
("mc_file", "@mc")]),
# Ouputs associated with the session template
(finalize_template, template_output,
[("output_path", "container"),
("out_file", "@func"),
("mean_file", "@mean"),
("tsnr_file", "@tsnr"),
("mask_file", "@mask"),
("noise_file", "@noise")]),
]
workflow.connect(processing_edges)
# Optionally connect QC nodes
qc_edges = [
# Registration of each frame to SBRef image
(run_input, ts2sb_qc,
[("run_tuple", "run_tuple")]),
(run_input, ts2sb_qc,
[("sb_file", "target_file")]),
(ts2sb, ts2sb_qc,
[("par_file", "realign_params")]),
# Registration of corrected SE-EPI to anatomy
(session_input, fm2anat_qc,
[("subject", "subject_id"),
("session_tuple", "session_tuple")]),
(fm2anat, fm2anat_qc,
[("registered_file", "in_file"),
("min_cost_file", "cost_file")]),
# Registration of SBRef volume to SE-EPI fieldmap
(run_input, sb2fm_qc,
[("run_tuple", "run_tuple")]),
(sb2fm, sb2fm_qc,
[("out_file", "in_file")]),
(finalize_unwarping, sb2fm_qc,
[("raw_file", "ref_file")]),
# Ouputs associated with each scanner run
(run_source, save_info,
[("run", "parameterization")]),
(save_info, timeseries_output,
[("info_file", "qc.@info_json")]),
(run_input, timeseries_output,
[("ts_plot", "qc.@raw_gif")]),
(sb2fm_qc, timeseries_output,
[("out_file", "qc.@sb2fm_gif")]),
(ts2sb_qc, timeseries_output,
[("params_plot", "qc.@params_plot"),
("target_plot", "qc.@target_plot")]),
(finalize_timeseries, timeseries_output,
[("out_gif", "qc.@ts_gif"),
("out_png", "qc.@ts_png"),
("mask_plot", "qc.@mask_plot"),
("mean_plot", "qc.@ts_mean_plot"),
("tsnr_plot", "qc.@ts_tsnr_plot"),
("noise_plot", "qc.@noise_plot")]),
# Outputs associated with the session template
(finalize_unwarping, template_output,
[("warp_plot", "qc.@warp_png"),
("unwarp_gif", "qc.@unwarp_gif")]),
(fm2anat_qc, template_output,
[("out_file", "qc.@reg_png")]),
(finalize_template, template_output,
[("out_plot", "qc.@func_png"),
("mean_plot", "qc.@mean"),
("tsnr_plot", "qc.@tsnr"),
("mask_plot", "qc.@mask"),
("noise_plot", "qc.@noise")]),
]
if qc:
workflow.connect(qc_edges)
return workflow
# =========================================================================== #
# Custom processing code
# =========================================================================== #
def generate_iterables(scan_info, experiment, subjects, sessions=None):
"""Return lists of variables for preproc workflow iterables.
Parameters
----------
scan_info : nested dictionaries
A nested dictionary structure with the following key levels:
- subject ids
- session ids
- experiment names
Where the inner values are lists of run ids.
experiment : string
Name of the experiment to generate iterables for.
subjects : list of strings
List of subject ids to generate iterables for.
sessions : list of strings, optional
List of sessions to generate iterables for.
Returns
-------
subject_iterables: list of strings
A list of the subjects with runs for this experiment.
session_iterables : dict
A dictionary where keys are subject ids and values are lists of
(subject id, session id) pairs
run_iterables : dict
A dictionary where keys are (subject id, session id) pairs and values
lists of (subject id, session id, run id) pairs.
"""
subject_iterables = []
session_iterables = {}
run_iterables = {}
for subj in subjects:
subject_session_iterables = []
for sess in scan_info[subj]:
session_run_iterables = []
if sessions is not None and sess not in sessions:
continue
if experiment in scan_info[subj][sess]:
for run in scan_info[subj][sess][experiment]:
session_run_iterables.append((subj, sess, run))
if session_run_iterables:
sess_key = subj, sess
subject_session_iterables.append(sess_key)
run_iterables[sess_key] = session_run_iterables
if subject_session_iterables:
subject_iterables.append(subj)
session_iterables[subj] = subject_session_iterables
return subject_iterables, session_iterables, run_iterables
# ---- Quality control mixins
class TimeSeriesGIF(object):
def write_time_series_gif(self, runtime, img, fname, title=None):
os.mkdir("png")
nx, ny, nz, nt = img.shape
delay = 10
width = 5
height = width * max([nx, ny, nz]) / sum([nz, ny, nz])
top = 1
if title is not None:
pad = .25
top = height / (height + pad)
height += pad
f, axes = plt.subplots(ncols=3, figsize=(width, height))
for ax in axes:
ax.set_axis_off()
if title is not None:
f.text(.5, top + (1 - top) / 2, title,
ha="center", va="center", color="w", size=10)
data = img.get_fdata()
vmin, vmax = np.percentile(data, [2, 98])
kws = dict(vmin=vmin, vmax=vmax, cmap="gray")
im_x = axes[0].imshow(np.zeros((nz, ny)), **kws)
im_y = axes[1].imshow(np.zeros((nz, nx)), **kws)
im_z = axes[2].imshow(np.zeros((ny, nx)), **kws)
f.subplots_adjust(0, 0, 1, top, 0, 0)
x, y, z = nx // 2, ny // 2, nz // 2
text = f.text(0.02, 0.02, "",
size=10, ha="left", va="bottom",
color="w", backgroundcolor="0")
pngs = []
for t in range(nt):
vol = data[..., t]
im_x.set_data(np.rot90(vol[x, :, :]))
im_y.set_data(np.rot90(vol[:, y, :]))
im_z.set_data(np.rot90(vol[:, :, z]))
if not t % 10:
text.set_text("T: {:d}".format(t))
frame_png = "png/{:04d}.png".format(t)
f.savefig(frame_png, facecolor="0", edgecolor="0")
pngs.append(frame_png)
cmdline = ["convert",
"-loop", "0",
"-delay", str(delay),
"-limit", "thread", "1"]
cmdline.extend(pngs)
cmdline.append(fname)
self.submit_cmdline(runtime, cmdline)
# ---- Data input and pre-preprocessing
class SessionInput(LymanInterface):
class input_spec(TraitedSpec):
session = traits.Tuple(traits.Str(), traits.Str())
data_dir = traits.Directory(exists=True)
proc_dir = traits.Directory(exists=True)
fm_template = traits.Str()
phase_encoding = traits.Either("ap", "pa")
class output_spec(TraitedSpec):
session_tuple = traits.Tuple(traits.Str(), traits.Str())
subject = traits.Str()
session = traits.Str()
fm_file = traits.File(exists=True)
fm_frames = traits.List(traits.File(exists=True))
reg_file = traits.File(exists=True)
seg_file = traits.File(exists=True)
anat_file = traits.File(exists=True)
mask_file = traits.File(exists=True)
phase_encoding = traits.List(traits.Str())
readout_times = traits.List(traits.Float())
def _run_interface(self, runtime):
# Determine the execution parameters
subject, session = self.inputs.session
self._results["session_tuple"] = self.inputs.session
self._results["subject"] = str(subject)
self._results["session"] = str(session)
# Determine the phase encoding directions
pe = self.inputs.phase_encoding
if pe == "ap":
pos_pe, neg_pe = "ap", "pa"
elif pe == "pa":
pos_pe, neg_pe = "pa", "ap"
# Spec out full paths to the pair of fieldmap files
keys = dict(subject=subject, session=session)
template = self.inputs.fm_template
func_dir = op.join(self.inputs.data_dir, subject, "func")
pos_fname = op.join(func_dir,
template.format(encoding=pos_pe, **keys))
neg_fname = op.join(func_dir,
template.format(encoding=neg_pe, **keys))
# Load the two images in canonical orientation
pos_img = nib.as_closest_canonical(nib.load(pos_fname))
neg_img = nib.as_closest_canonical(nib.load(neg_fname))
affine, header = pos_img.affine, pos_img.header
# Concatenate the images into a single volume
pos_data = pos_img.get_fdata()
neg_data = neg_img.get_fdata()
data = np.concatenate([pos_data, neg_data], axis=-1)
assert len(data.shape) == 4
# Convert image datatype to float
header.set_data_dtype(np.float32)
# Write out a 4D file
fname = self.define_output("fm_file", "fieldmap.nii.gz")
img = nib.Nifti1Image(data, affine, header)
img.to_filename(fname)
# Write out a set of 3D files for each frame
fm_frames = []
frames = nib.four_to_three(img)
for i, frame in enumerate(frames):
fname = op.abspath("fieldmap_{:02d}.nii.gz".format(i))
fm_frames.append(fname)
frame.to_filename(fname)
self._results["fm_frames"] = fm_frames
# Define phase encoding and readout times for TOPUP
pe_dir = ["y"] * pos_img.shape[-1] + ["y-"] * neg_img.shape[-1]
readout_times = [1 for _ in pe_dir]
self._results["phase_encoding"] = pe_dir
self._results["readout_times"] = readout_times
# Load files from the template directory
template_path = op.join(self.inputs.proc_dir, subject, "template")
results = dict(
reg_file=op.join(template_path, "anat2func.mat"),
seg_file=op.join(template_path, "seg.nii.gz"),
anat_file=op.join(template_path, "anat.nii.gz"),
mask_file=op.join(template_path, "mask.nii.gz"),
)
self._results.update(results)
return runtime
class RunInput(LymanInterface, TimeSeriesGIF):
class input_spec(TraitedSpec):
run = traits.Tuple(traits.Str(), traits.Str(), traits.Str())
data_dir = traits.Directory(exists=True)
proc_dir = traits.Directory(exists=True)
experiment = traits.Str()
sb_template = traits.Str()
ts_template = traits.Str()
crop_frames = traits.Int(0, usedefault=True)
class output_spec(TraitedSpec):
run_tuple = traits.Tuple(traits.Str(), traits.Str(), traits.Str())
subject = traits.Str()
session = traits.Str()
run = traits.Str()
sb_file = traits.File(exists=True)
ts_file = traits.File(exists=True)
ts_frames = traits.List(traits.File(exists=True))
ts_plot = traits.File(exists=True)
reg_file = traits.File(exists=True)
seg_file = traits.File(exists=True)
anat_file = traits.File(exists=True)
mask_file = traits.File(exists=True)
output_path = traits.Directory()
def _run_interface(self, runtime):
# Determine the parameters
experiment = self.inputs.experiment
subject, session, run = self.inputs.run
self._results["run_tuple"] = self.inputs.run
self._results["subject"] = subject
self._results["session"] = session
self._results["run"] = run
# Spec out paths to the input files
keys = dict(subject=subject, experiment=experiment,
session=session, run=run)
sb_fname = op.join(self.inputs.data_dir, subject, "func",
self.inputs.sb_template.format(**keys))
ts_fname = op.join(self.inputs.data_dir, subject, "func",
self.inputs.ts_template.format(**keys))
# Load the input images in canonical orientation
sb_img = nib.as_closest_canonical(nib.load(sb_fname))
ts_img = nib.as_closest_canonical(nib.load(ts_fname))
# Convert image datatypes to float
sb_img.set_data_dtype(np.float32)
ts_img.set_data_dtype(np.float32)
# Optionally crop the first n frames of the timeseries
if self.inputs.crop_frames > 0:
ts_data = ts_img.get_fdata()
ts_data = ts_data[..., self.inputs.crop_frames:]
ts_img = nib.Nifti1Image(ts_data, ts_img.affine, ts_img.header)
# Write out the new images
self.write_image("sb_file", "sb.nii.gz", sb_img)
self.write_image("ts_file", "ts.nii.gz", ts_img)
# Write out each frame of the timeseries
os.mkdir("frames")
ts_frames = []
ts_frame_imgs = nib.four_to_three(ts_img)
for i, frame_img in enumerate(ts_frame_imgs):
frame_fname = op.abspath("frames/frame{:04d}.nii.gz".format(i))
ts_frames.append(frame_fname)
frame_img.to_filename(frame_fname)
self._results["ts_frames"] = ts_frames
# Make a GIF movie of the raw timeseries
qc_title = "{} {} {}".format(subject, session, run)
out_plot = self.define_output("ts_plot", "raw.gif")
self.write_time_series_gif(runtime, ts_img, out_plot, title=qc_title)
# Load files from the template directory
template_path = op.join(self.inputs.proc_dir, subject, "template")
results = dict(
reg_file=op.join(template_path, "anat2func.mat"),
seg_file=op.join(template_path, "seg.nii.gz"),
anat_file=op.join(template_path, "anat.nii.gz"),
mask_file=op.join(template_path, "mask.nii.gz"),
)
self._results.update(results)
return runtime
# --- Preprocessing operations
class CombineLinearTransforms(LymanInterface):
class input_spec(TraitedSpec):
ts2sb_file = traits.File(exists=True)
sb2fm_file = traits.File(exists=True)
fm2anat_file = traits.File(exits=True)
anat2temp_file = traits.File(exists=True)
class output_spec(TraitedSpec):
ts2fm_file = traits.File(exists=True)
fm2temp_file = traits.File(exists=True)
def _run_interface(self, runtime):
# Combine the pre-warp transform
ts2sb_mat = np.loadtxt(self.inputs.ts2sb_file)
sb2fm_mat = np.loadtxt(self.inputs.sb2fm_file)
ts2fm_mat = np.dot(sb2fm_mat, ts2sb_mat)
ts2fm_file = self.define_output("ts2fm_file", "ts2fm.mat")
np.savetxt(ts2fm_file, ts2fm_mat, delimiter=" ")
# Combine the post-warp transform
fm2anat_mat = np.loadtxt(self.inputs.fm2anat_file)
anat2temp_mat = np.loadtxt(self.inputs.anat2temp_file)
fm2temp_mat = np.dot(anat2temp_mat, fm2anat_mat)
fm2temp_file = self.define_output("fm2temp_file", "fm2temp.mat")
np.savetxt(fm2temp_file, fm2temp_mat, delimiter=" ")
return runtime
class FinalizeUnwarping(LymanInterface):
class input_spec(TraitedSpec):
raw_file = traits.File(exists=True)
corrected_file = traits.File(exists=True)
warp_files = traits.List(traits.File(exists=True))
jacobian_files = traits.List(traits.File(Exists=True))
phase_encoding = traits.List(traits.Str)
session_tuple = traits.Tuple(traits.Str(), traits.Str())
class output_spec(TraitedSpec):
raw_file = traits.File(exists=True)
corrected_file = traits.File(exists=True)
warp_file = traits.File(exists=True)
mask_file = traits.File(exists=True)
jacobian_file = traits.File(Exists=True)
warp_plot = traits.File(exists=True)
unwarp_gif = traits.File(exists=True)
def _run_interface(self, runtime):
# Load the 4D raw fieldmap image and select first frame
raw_img_frames = nib.load(self.inputs.raw_file)
raw_img = nib.four_to_three(raw_img_frames)[0]
affine, header = raw_img.affine, raw_img.header
# Write out the raw image to serve as a registration target
self.write_image("raw_file", "raw.nii.gz", raw_img)
# Load the 4D jacobian image
jac_img_frames = nib.concat_images(self.inputs.jacobian_files)
jac_data = jac_img_frames.get_fdata()
# Load the 4D corrected fieldmap image
corr_img_frames = nib.load(self.inputs.corrected_file)
corr_data = corr_img_frames.get_fdata()
# Average the corrected image over the final dimension and write
corr_data = corr_data.mean(axis=-1)
self.write_image("corrected_file", "func.nii.gz",
corr_data, affine, header)
# Save the jacobian images using the raw geometry
self.write_image("jacobian_file", "jacobian.nii.gz",
jac_data, affine, header)
# Select the first warpfield image
# We combine the two fieldmap images so that the first one has
# a phase encoding that matches the time series data.
# Also note that when the fieldmap images have multiple frames,
# the warps corresponding to those frames are identical.
warp_file = self.inputs.warp_files[0]
# Load in the the warp file and save out with the correct affine
# (topup doesn't save the header geometry correctly for some reason)
warp_data = nib.load(warp_file).get_fdata()
self.write_image("warp_file", "warp.nii.gz", warp_data, affine, header)
# Select the warp along the phase encode direction
# Note: we elsewhere currently require phase encoding to be AP or PA
# so because the input node transforms the fieldmap to canonical
# orientation this will work. But in the future we might want to ne
# more flexible with what we accept and will need to change this.
warp_data_y = warp_data[..., 1]
# Write out a mask to exclude voxels with large distortions/dropout
mask_data = (np.abs(warp_data_y) < 4).astype(np.int)
self.write_image("mask_file", "warp_mask.nii.gz",
mask_data, affine, header)
# Get the metadata
qc_title = " ".join(self.inputs.session_tuple)
# Generate a QC image of the warpfield
m = Mosaic(raw_img, warp_data_y, title=qc_title)
m.plot_overlay("coolwarm", vmin=-6, vmax=6, alpha=.75)
self.write_visualization("warp_plot", "warp.png", m)
# Generate a QC gif of the unwarping performance
self.generate_unwarp_gif(runtime, raw_img_frames, corr_img_frames)
return runtime
def generate_unwarp_gif(self, runtime, raw_img, corrected_img):
# Load the input and output files
vol_data = dict(
orig=raw_img.get_fdata(),
corr=corrected_img.get_fdata(),
)
# Average over the frames that correspond to unique encoding directions
pe_data = dict(orig=[], corr=[])
pe = np.array(self.inputs.phase_encoding)
for enc in np.unique(pe):
enc_trs = pe == enc
for scan in ["orig", "corr"]:
enc_data = vol_data[scan][..., enc_trs].mean(axis=-1)
pe_data[scan].append(enc_data)
# Compute the spatial correlation within image pairs
r_vals = dict()
for scan, (scan_pos, scan_neg) in pe_data.items():
r_vals[scan] = np.corrcoef(scan_pos.flat, scan_neg.flat)[0, 1]
# Set up the figure parameters
nx, ny, nz, _ = vol_data["orig"].shape
x_slc = (np.linspace(.2, .8, 8) * nx).astype(np.int)
vmin, vmax = np.percentile(vol_data["orig"].flat, [2, 98])
kws = dict(vmin=vmin, vmax=vmax, cmap="gray")
text_kws = dict(size=7, color="w", backgroundcolor="0",
ha="left", va="bottom")
qc_title = " ".join(self.inputs.session_tuple)
width = len(x_slc)
height = (nz / ny) * 2.75
png_fnames = []
for i, enc in enumerate(["pos", "neg"]):
# Initialize the figure and axes
f = plt.figure(figsize=(width, height))
gs = dict(
orig=plt.GridSpec(
nrows=1, ncols=len(x_slc), figure=f,
left=0, bottom=.5, right=1, top=.94,
wspace=0, hspace=0,
),
corr=plt.GridSpec(
nrows=1, ncols=len(x_slc), figure=f,
left=0, bottom=0, right=1, top=.44,
wspace=0, hspace=0,
)
)
# Add text with the image pair correlation
f.text(.05, .93,
"Original similarity: {:.2f}".format(r_vals["orig"]),
**text_kws)
f.text(.05, .43,
"Corrected similarity: {:.2f}".format(r_vals["corr"]),
**text_kws)
# Add a title with the metadata
f.suptitle(qc_title, y=.99, size=10, color="w")
# Plot the image data and save the static figure
for scan in ["orig", "corr"]:
axes = [f.add_subplot(pos) for pos in gs[scan]]
vol = pe_data[scan][i]
for ax, x in zip(axes, x_slc):
slice = np.rot90(vol[x])
ax.imshow(slice, **kws)
ax.set_axis_off()
png_fname = "frame{}.png".format(i)
png_fnames.append(png_fname)
f.savefig(png_fname, facecolor="0", edgecolor="0")
plt.close(f)
# Combine frames into an animated gif
out_file = self.define_output("unwarp_gif", "unwarp.gif")
cmdline = ["convert",
"-loop", "0",
"-delay", "100",
"-limit", "thread", "1"]
cmdline.extend(png_fnames)
cmdline.append(out_file)
self.submit_cmdline(runtime, cmdline)
class FinalizeTimeseries(LymanInterface, TimeSeriesGIF):
class input_spec(TraitedSpec):
experiment = traits.Str()
run_tuple = traits.Tuple(traits.Str(), traits.Str(), traits.Str())
anat_file = traits.File(exists=True)
in_files = traits.List(traits.File(exists=True))
seg_file = traits.File(exists=True)
mask_file = traits.File(exists=True)
jacobian_file = traits.File(exists=True)
mc_file = traits.File(exists=True)
class output_spec(TraitedSpec):
out_file = traits.File(exists=True)
out_gif = traits.File(exists=True)
out_png = traits.File(exists=True)
mean_file = traits.File(exists=True)
mean_plot = traits.File(exists=True)
tsnr_file = traits.File(exists=True)
tsnr_plot = traits.File(exists=True)
mask_file = traits.File(exists=True)
mask_plot = traits.File(exists=True)
noise_file = traits.File(exists=True)
noise_plot = traits.File(exists=True)
mc_file = traits.File(exists=True)
output_path = traits.Directory()
def _run_interface(self, runtime):
# Concatenate timeseries frames into 4D image
# TODO Note that the TR information is not propogated into the header
img = nib.concat_images(self.inputs.in_files)
affine, header = img.affine, img.header
data = img.get_fdata()
# Load the template brain mask image
mask_img = nib.load(self.inputs.mask_file)
mask = mask_img.get_fdata().astype(np.bool)
# Compute a run-specfic mask that excludes voxels outside the FOV
mask &= data.var(axis=-1) > 0
self.write_image("mask_file", "mask.nii.gz",
mask.astype(np.int), affine, header)
# Zero-out data outside the mask
data[~mask] = 0
# Jacobian modulate each frame of the timeseries image
jacobian_img = nib.load(self.inputs.jacobian_file)
jacobian = jacobian_img.get_fdata()[..., [0]]
data *= jacobian
# Scale the timeseries for cross-run intensity normalization
target = 100
scale_value = target / data[mask].mean()
data = data * scale_value
# Remove linear but not constant trend
data[mask] = signals.detrend(data[mask], axis=-1, replace_mean=True)
# Save out the final time series
out_img = self.write_image("out_file", "func.nii.gz",
data, affine, header)
# Generate the temporal mean and SNR images
mean = data.mean(axis=-1)
sd = data.std(axis=-1)
mask &= sd > 0
with np.errstate(all="ignore"):
tsnr = mean / sd
tsnr[~mask] = 0
self.write_image("mean_file", "mean.nii.gz", mean, affine, header)
self.write_image("tsnr_file", "tsnr.nii.gz", tsnr, affine, header)
# Load the template anatomical image
anat_img = nib.load(self.inputs.anat_file)
# Load the template segmentation image
seg_img = nib.load(self.inputs.seg_file)
seg = seg_img.get_fdata()
# Identify unusually noisy voxels
gray_mask = (0 < seg) & (seg < 5)
gray_img = nib.Nifti1Image(gray_mask, img.affine, img.header)
noise_img = signals.identify_noisy_voxels(
out_img, gray_img, neighborhood=5, threshold=1.5, detrend=False
)
self.write_image("noise_file", "noise.nii.gz", noise_img)
# Load the motion correction params and convert to CSV with header
mc_file = self.define_output("mc_file", "mc.csv")
mc_data = np.loadtxt(self.inputs.mc_file)
cols = ["rot_x", "rot_y", "rot_z", "trans_x", "trans_y", "trans_z"]
mc_data = | pd.DataFrame(mc_data, columns=cols) | pandas.DataFrame |
import unittest
import xmlrunner
import pandas as pd
import numpy as np
class DummyUnitTest(unittest.TestCase):
def setUp(self):
self.int = 5
self.yes = True
self.no = False
self.float = 0.5
self.pi = 3.141592653589793238462643383279
self.string = "Miguel"
self.none = None
self.list = [1, 2, 3]
self.dict = {"a": 1, "b": 2}
self.np_array = np.array(self.list)
self.df = | pd.DataFrame(self.dict, index=[0]) | pandas.DataFrame |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = | pd.Timestamp('2022-09-21') | pandas.Timestamp |
import os
from sklearn import metrics
from . import data as D
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from . import plotting
SURROGATES = 'surrogates iaaft'.split() + [None]
TESTSET_VALS = (False, True)
LABELS = 'Wake S1 S2 S3 S4 REM'.split()
REDUCED_LABELS = 'Wake Light Deep REM'.split()
_REDUCTION = dict([
('Wake', 'Wake'),
('S1', 'Light'),
('S2', 'Light'),
('S3', 'Deep'),
('S4', 'Deep'),
('REM', 'REM')
])
_REDUCTION = {
LABELS.index(full): REDUCED_LABELS.index(reduced)
for full, reduced in _REDUCTION.items()
}
reduce_stages = np.vectorize(_REDUCTION.get)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plotting.colorscheme['accuracy']):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, None]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{}%'.format(int(100.0*cm[i, j])),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=12)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return
def plot_mats(df, title_prefix=None):
title = 'Training set'
if title_prefix is not None:
title = title_prefix+', '+title
DF = df[df.testset == False]
plt.figure(figsize=(15, 6))
plt.subplot(121)
cm = metrics.confusion_matrix(DF.truth, DF.prediction)
plot_confusion_matrix(cm, labels, normalize=True, title=title)
plt.clim(0, 1)
DF = df[df.testset == True]
plt.subplot(122)
cm = metrics.confusion_matrix(DF.truth, DF.prediction)
plot_confusion_matrix(cm, labels, normalize=True, title='Test set')
plt.clim(0, 1)
plt.tight_layout()
return
def plot_mats_reduced(df, title_prefix=None):
title = 'Training set'
if title_prefix is not None:
title = title_prefix+', '+title
DF = df[df.testset == False]
plt.figure(figsize=(15, 6))
plt.subplot(121)
cm = metrics.confusion_matrix(reduce_stages(DF.truth), reduce_stages(DF.prediction))
plot_confusion_matrix(cm, reduced_labels, normalize=True, title=title)
plt.clim(0, 1)
DF = df[df.testset == True]
plt.subplot(122)
cm = metrics.confusion_matrix(reduce_stages(DF.truth), reduce_stages(DF.prediction))
plot_confusion_matrix(cm, reduced_labels, normalize=True, title='Test set')
plt.clim(0, 1)
plt.tight_layout()
return
def f1_score(df, average='macro'):
return metrics.f1_score(df.truth, df.prediction, average=average)
class _Crossval:
def __init__(self, data=None):
self._data = data
self._accuracy = None
self._reduced_accuracy = None
self._total_accuracy = None
@property
def data(self):
return self._data
@property
def accuracy(self):
if self._accuracy is None:
df = self.data
assert sum(df.pid=='brux1') == 0
mindex = pd.MultiIndex.from_product([D.age_group_bins, TESTSET_VALS, LABELS, LABELS])
mindex.names = ['age_group', 'testset', 'groundtruth', 'prediction']
acc = pd.Series(data=None, index=mindex)
for Bin in D.age_group_bins:
for testset in TESTSET_VALS:
mask = (df.testset == testset) & (df.age_group == Bin)
dfm = df[mask]
cm = metrics.confusion_matrix(dfm.truth, dfm.prediction)
cm = cm.astype(np.float) / cm.sum(axis=1)[:, None]
for true_label, cm_l in zip(LABELS, cm):
for pred_label, cm_lp in zip(LABELS, cm_l):
acc[(Bin, testset, true_label, pred_label)] = cm_lp
self._accuracy = acc
return self._accuracy
@property
def reduced_accuracy(self):
if self._reduced_accuracy is None:
df = self.data
assert sum(df.pid=='brux1') == 0
mindex = pd.MultiIndex.from_product(
[D.age_group_bins, TESTSET_VALS, REDUCED_LABELS, REDUCED_LABELS])
mindex.names = ['age_group', 'testset', 'groundtruth', 'prediction']
acc = | pd.Series(data=None, index=mindex) | pandas.Series |
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import numpy as np
import seaborn as sns
import os,sys
import itertools
import datetime
import scipy.signal as signal
import matplotlib.dates as mdates
from sklearn.neighbors._kde import KernelDensity
from sklearn.model_selection import GridSearchCV, LeaveOneOut
## if you want to use optimization for kde, uncomment the following two lines.
# from sklearn.grid_search import GridSearchCV
# from sklearn.cross_validation import LeaveOneOut
def get_meet_sec(df_meet):
"""
return a df with a datetime index rounded to second level.
"""
df_meet_sec = df_meet.copy()
df_meet_sec.index = df_meet_sec.index.map(lambda x: x.replace(microsecond=0))
return df_meet_sec
# return : 초단위로 반올림한 datetime 인덱스
def get_meet_flt(df_meet, window=8):
"""
data processing with median filter.
"""
df_flt = pd.rolling_median(df_meet, window=window)
return df_flt
# reutrn : 중간값 필터를 거치는 데이터 처리
def get_df_cor(df_meeting, sel_users):
"""
return df of correlation of two selected users.
"""
df_meeting_sel = df_meeting[sel_users]
df_sel_sec = get_meet_sec(df_meeting_sel)
ts = []
cors = []
for i in df_sel_sec.index.unique():
frame = df_sel_sec.loc[i]
dfa, dfb = frame[sel_users[0]], frame[sel_users[1]]
p = stats.pearsonr(dfa, dfb)[0]
if not np.isnan(p):
ts.append(i)
cors.append(p)
df_cor = pd.DataFrame(np.vstack([ts, cors]).T, columns=['time', 'cor'])
df_cor.set_index('time', inplace=True)
return df_cor
# return : 선택된 두 user의 상관관계에 대한 데이터 프레임
def get_kde_pdf(X, bandwidth=2, step=.1, num_samples=200, optimize=False):
"""
return kde and pdf from a data sample
"""
if len(X) ==0 :
return [],np.array([]),[]
if optimize:
bandwidths = 10 ** np.linspace(-1, 1, 10)
grid = GridSearchCV(KernelDensity(kernel='gaussian'), {'bandwidth': bandwidths},
cv=LeaveOneOut(len(X)))
grid.fit(X[:, None]);
kde = KernelDensity(kernel='gaussian', bandwidth=grid.best_params_['bandwidth']).fit(X[:,None])
else:
kde = KernelDensity(kernel='gaussian', bandwidth=2).fit(X[:,None])
pdf = np.exp( kde.score_samples(np.arange(0, 100, step)[:,None]) )
samples = kde.sample(num_samples)
return kde, np.array(pdf), samples
# data 샘플로 부터의 kde와 pdf
def get_seps(dt_nys, prox=0.001, step=0.1, num_samples=200, bandwidth=2):
"""
return cut-off points for all users
"""
seps = []
prox = 0.01
for idx, user in enumerate(dt_nys):
ns, ys = dt_nys[user]
cond_nonezero = len(ns)==0 or len(ys)==0
kden, pns, nss = get_kde_pdf(ns, bandwidth, step, num_samples)
kdey, pys, yss = get_kde_pdf(ys, bandwidth, step, num_samples)
pys[pys<=prox] = 0
pns[pns<=prox] = 0
sep = -1
if not cond_nonezero:
for i in np.arange(int(100/step)-1, 0, -1):
if pys[i-1] < pns[i-1] and pys[i] >= pns[i]:
sep = i * step
break
seps.append(sep)
seps = np.array(seps)
seps[seps == -1] = seps[seps != -1].mean()
return seps
# return : 모든 user에 대한 차단점(cut-off points)
def get_kldistance(dt_nys, bandwidth=2, prox=0.001, step=0.1, num_samples=200, plot=False, figsize=(12,8)):
"""
only for 4-user situations
calculate kl-distance of two distributions (D_t and D_s))
"""
klds, seps = [], []
if plot is True:
fig, axs = plt.subplots(2,2,figsize=figsize,)
plt.tight_layout(h_pad=4)
for idx, user in enumerate(dt_nys):
ns, ys = dt_nys[user]
cond_nonezero = len(ns) == 0 or len(ys) ==0
kden, pns, nss = get_kde_pdf(ns, step=step, num_samples=num_samples, bandwidth=bandwidth)
kdey, pys, yss = get_kde_pdf(ys, step=step, num_samples=num_samples, bandwidth=bandwidth)
kldistance = stats.entropy(pns, pys) if not cond_nonezero else np.nan
if not np.isinf(kldistance) and not np.isnan(kldistance):
klds.append(kldistance)
pys[pys<=prox] = 0
pns[pns<=prox] = 0
sep = -1
if not cond_nonezero:
for i in np.arange(int(100/step)-1, 0, -1):
if pys[i-1] < pns[i-1] and pys[i] >= pns[i]:
sep = i * step
break
seps.append(sep)
if plot is True:
ax = axs.flatten()[idx]
sns.distplot(nss, label='Silent', kde=False, norm_hist=True, ax=ax)
sns.distplot(yss, label='Talking', kde=False, norm_hist=True, ax=ax)
ax.set_title('%s kl-dist:%.2f' % (user, kldistance) )
ax.set_xlabel('')
if not cond_nonezero:
ax.axvline(x=sep)
ax.annotate('best sep val: %.1f' % sep, xy=(sep, 0.1), xytext=(sep+5, 0.1),
arrowprops= dict(facecolor='black', shrink=0.0001))
ax.legend()
seps = np.array(seps)
seps[seps == -1] = seps[seps != -1].mean()
return klds, seps
# 4인의 user 상황에만 해당
# 두 기여도에대한 kl거리 계산(D-t, D_s)
def get_ts_distribution(df_meet, df_spk):
"""
get distributions for all subjects when they talk or keep silent
"""
dt_nys = {}
for user in df_meet.columns:
ns = df_spk.loc[df_spk.speaker != user][user]
ys = df_spk.loc[df_spk.speaker == user][user]
dt_nys[user] = [ns, ys]
return dt_nys
# 그들이 말을하거나 침묵을 유지할 때 모든 subjuect 에 대한 기여도 얻음
def get_spk_genuine(df_meet, thre):
"""
get genuine spk
"""
df_meet_sec = get_meet_sec(df_meet)
df_cor = df_meet_sec.groupby(df_meet_sec.index).corr().dropna()
df_cor = pd.DataFrame((df_cor >= thre).T.all())
df_cor.reset_index(inplace=True)
df_cor.columns = ['datetime', 'member', 'val']
## Find those people whoes correlation with others are all higher than thre
df_cor = df_cor.pivot(index='datetime', columns='member', values='val')
df_mean_ori = df_meet_sec.groupby(df_meet_sec.index).agg(np.mean)
df_std_ori = df_meet_sec.groupby(df_meet_sec.index).agg(np.std)
df_mean = pd.DataFrame(df_mean_ori.T.idxmax(), columns=['speaker'])
## combine 'correlation' and 'volume' to locate the speaker
df_comb = df_mean.merge(df_cor, left_index=True, right_index=True)
## df_comb_sel contains the speaker information
idx = [df_comb.iloc[i, u] for i,u in enumerate(df_comb.speaker)]
df_comb_sel = df_comb[idx][['speaker']]
## get speakers' mean
df_spk_mean = df_comb_sel.merge(df_mean_ori, left_index=True, right_index=True)
## get their std
df_spk_std = df_comb_sel.merge(df_std_ori, left_index=True, right_index=True)
return df_spk_mean, df_spk_std
# genuine spk 얻음
def get_spk_all(df_flt, df_spk_mean, df_spk_std, bandwidth=2):
"""
get all spk
"""
df_flt_sec = get_meet_sec(df_flt)
gps_mean = df_flt_sec.groupby(df_flt_sec.index).mean()
gps_std = df_flt_sec.groupby(df_flt_sec.index).std()
speak_all = []
nys_mean = get_ts_distribution(df_flt, df_spk_mean)
nys_std = get_ts_distribution(df_flt, df_spk_std)
seps_mean = get_seps(nys_mean, bandwidth=bandwidth)
seps_std = get_seps(nys_std, bandwidth=bandwidth)
for i, k in enumerate(df_flt.columns):
volume_mean = seps_mean[i]
volume_std = seps_std[i]
# print '[get_speak_all] user sep_val:', k, volume_mean, volume_std
df_std = pd.DataFrame(gps_std[gps_std[k] >= volume_std])
df_mean = pd.DataFrame(gps_mean[gps_mean[k] >= volume_mean])
df_mean_add = | pd.DataFrame(gps_mean.loc[df_std.index]) | pandas.DataFrame |
import itertools
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from pytorch_toolbelt.utils.fs import id_from_fname
from pytorch_toolbelt.utils.torch_utils import to_numpy
from sklearn.metrics import cohen_kappa_score
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from retinopathy.dataset import get_datasets, get_class_names
from retinopathy.factory import get_model
from retinopathy.inference import run_model_inference_via_dataset, \
reg_predictions_to_submission
def plot_confusion_matrix(cm, class_names,
figsize=(16, 16),
normalize=False,
title='Confusion matrix',
fname=None,
noshow=False):
"""Render the confusion matrix and return matplotlib's figure with it.
Normalization can be applied by setting `normalize=True`.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
cmap = plt.cm.Oranges
if normalize:
cm = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
f = plt.figure(figsize=figsize)
plt.title(title)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45, ha='right')
# f.tick_params(direction='inout')
# f.set_xticklabels(varLabels, rotation=45, ha='right')
# f.set_yticklabels(varLabels, rotation=45, va='top')
plt.yticks(tick_marks, class_names)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if fname is not None:
plt.savefig(fname=fname)
if not noshow:
plt.show()
return f
def evaluate_generalization(checkpoints, fold=None, num_folds=4):
num_datasets = len(checkpoints)
kappa_matrix = np.zeros((num_datasets, num_datasets), dtype=np.float32)
class_names = list(checkpoints.keys())
checkpoint_files = list(checkpoints[name] for name in class_names)
for i, dataset_name in enumerate(class_names):
_, valid_ds, _ = get_datasets(use_aptos2015=dataset_name == 'aptos2015',
use_aptos2019=dataset_name == 'aptos2019',
use_messidor=dataset_name == 'messidor',
use_idrid=dataset_name == 'idrid',
fold=fold,
folds=num_folds)
for j, checkpoint_file in enumerate(checkpoint_files):
print('Evaluating', dataset_name, 'on', checkpoint_file)
p = run_model_inference_via_dataset(model_checkpoint=checkpoint_file,
dataset=valid_ds,
batch_size=32 * 3,
apply_softmax=False)
diagnosis = reg_predictions_to_submission(p)['diagnosis'].values
score = cohen_kappa_score(diagnosis, valid_ds.targets, weights='quadratic')
kappa_matrix[i, j] = score
print(kappa_matrix)
np.save('kappa_matrix', kappa_matrix)
# kappa_matrix = np.array([
# [0.6204755, 0.508746, 0.47336853, 0.5163422],
# [0.80155796, 0.92287904, 0.7245792, 0.80202734],
# [0.7953327, 0.77868223, 0.8940796, 0.7031926],
# [0.7898711, 0.6854141, 0.6820601, 0.92435944]])
plot_confusion_matrix(kappa_matrix, normalize=False, fname='kappa_matrix.png', class_names=class_names)
@torch.no_grad()
def evaluate_generalization(checkpoints, num_folds=4):
num_datasets = len(checkpoints)
# kappa_matrix = np.zeros((num_datasets, num_datasets), dtype=np.float32)
class_names = list(checkpoints.keys())
# results = {}
for dataset_trained_on, checkpoints_per_fold in checkpoints.items():
# For each dataset trained on
for fold_trained_on, checkpoint_file in enumerate(checkpoints_per_fold):
# For each checkpoint
if checkpoint_file is None:
continue
# Load model
checkpoint = torch.load(checkpoint_file)
model_name = checkpoint['checkpoint_data']['cmd_args']['model']
batch_size = 16 # checkpoint['checkpoint_data']['cmd_args']['batch_size']
num_classes = len(get_class_names())
model = get_model(model_name, pretrained=False, num_classes=num_classes)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.eval().cuda()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
for dataset_index, dataset_validate_on in enumerate(class_names):
# For each available dataset
for fold_validate_on in range(num_folds):
_, valid_ds, _ = get_datasets(use_aptos2015=dataset_validate_on == 'aptos2015',
use_aptos2019=dataset_validate_on == 'aptos2019',
use_messidor=dataset_validate_on == 'messidor',
use_idrid=dataset_validate_on == 'idrid',
fold=fold_validate_on,
folds=num_folds)
data_loader = DataLoader(valid_ds, batch_size * torch.cuda.device_count(),
pin_memory=True,
num_workers=8)
predictions = defaultdict(list)
for batch in tqdm(data_loader,
desc=f'Evaluating {dataset_validate_on} fold {fold_validate_on} on {checkpoint_file}'):
input = batch['image'].cuda(non_blocking=True)
outputs = model(input)
logits = to_numpy(outputs['logits'].softmax(dim=1))
regression = to_numpy(outputs['regression'])
features = to_numpy(outputs['features'])
predictions['image_id'].extend(batch['image_id'])
predictions['diagnosis_true'].extend(to_numpy(batch['targets']))
predictions['logits'].extend(logits)
predictions['regression'].extend(regression)
predictions['features'].extend(features)
pickle_name = id_from_fname(
checkpoint_file) + f'_on_{dataset_validate_on}_fold{fold_validate_on}.pkl'
df = | pd.DataFrame.from_dict(predictions) | pandas.DataFrame.from_dict |
from .test_dataset import CMD_CREATE_TEST_TABLE
import pytest
import pandas as pd
import numpy as np
import os
from ..dataset import sql_dataset
from .gen_rand_data import rand_df
CMD_DROP_TEST_TABLE_IF_EXISTS = "IF OBJECT_ID('test_table', 'U') IS NOT NULL DROP TABLE test_table;"
CMD_CREATE_TRUNCATED_TEST_TABLE = """
CREATE TABLE test_table (
[dt] datetime NULL,
[uid] nvarchar(10) NULL,
[name] nvarchar(10) NULL,
[empty_col] nvarchar(100) NULL,
[float] decimal(22,3) NULL,
[float_k] decimal(22,3) NULL,
[float_m] decimal(22,13) NULL,
[float_b] decimal(22,9) NULL,
[float_na] decimal(22,3) NULL,
[bit] bit NULL,
[bit_na] bit NULL,
[tinyint] tinyint NULL,
[tinyint_na] tinyint NULL,
[smallint] smallint NULL,
[smallint_na] smallint NULL,
[int] int NULL,
[int_na] int NULL,
[bigint] bigint NULL,
[bigint_na] bigint NULL,
[bool] bit NULL,
[bool_na] bit NULL,
[empty_str_col] nvarchar(100) NULL
);
"""
def cleanup_test_data_csv():
try:
os.remove('./tests/test_data.csv')
except:
pass
def cleanup_test_data_copy_csv():
try:
os.remove('./tests/test_data_copy.csv')
except:
pass
@pytest.fixture(scope='session')
def gen_test_csv(request):
df = rand_df(100000)
df.to_csv('./tests/test_data.csv', encoding='utf-8-sig', index=False)
request.addfinalizer(cleanup_test_data_csv)
def test_read_upload_query_bcp(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.upload(mode='overwrite_table', bcp=True, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.upload(mode='overwrite_data', bcp=True, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_bcp_truncate(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
# create a table too short to test upload(truncate=True/False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.send_cmd(CMD_CREATE_TRUNCATED_TEST_TABLE)
with pytest.raises(ValueError):
# should raise errors because it won't fit
sd.upload(bcp=True, truncate=False, verbose=verbose, mode='overwrite_data')
sd.upload(bcp=True, truncate=True, verbose=verbose, mode='overwrite_data')
df_queried = sd.query().data
# truncate df_orig accordingly for equality assertion
df_orig['uid'] = df_orig['uid'].str[:10]
df_orig['name'] = df_orig['name'].str[:10]
df_orig['float'] = df_orig['float'].round(3)
df_orig['float_k'] = df_orig['float_k'].round(3)
df_orig['float_na'] = df_orig['float_na'].round(3)
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_pyodbc(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.upload(mode='overwrite_table', bcp=False, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.upload(mode='overwrite_data', bcp=False, verbose=verbose)
df_queried = sd.query().data
pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
def test_read_upload_query_pyodbc_truncate(gen_test_csv, verbose=True):
sd = sql_dataset('./tests/config/integration/database.yml').read()
sd.data['dt'] = pd.to_datetime(sd.data['dt'])
df_orig = sd.data.copy()
# create a table too short to test upload(truncate=True/False)
sd.send_cmd(CMD_DROP_TEST_TABLE_IF_EXISTS)
sd.send_cmd(CMD_CREATE_TRUNCATED_TEST_TABLE)
with pytest.raises(ValueError):
# should raise errors because it won't fit
sd.upload(bcp=False, truncate=False, verbose=verbose, mode='overwrite_data')
sd.upload(bcp=False, truncate=True, verbose=verbose, mode='overwrite_data')
df_queried = sd.query().data
# truncate df_orig accordingly for equality assertion
df_orig['uid'] = df_orig['uid'].str[:10]
df_orig['name'] = df_orig['name'].str[:10]
df_orig['float'] = df_orig['float'].round(3)
df_orig['float_k'] = df_orig['float_k'].round(3)
df_orig['float_na'] = df_orig['float_na'].round(3)
| pd.testing.assert_frame_equal(df_queried, df_orig, check_dtype=False, check_names=False) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import statistics
import math
from sklearn.decomposition import PCA
COLUMNS_TO_USE = ["Home_type", "rooms", "home_size_m2", "lotsize_m2",
"expenses_dkk", "floor_as_int", "balcony", "zipcodes",
"m2_price", "age", "zipcode_avg_m2_price", "list_price_dkk"]
# remove 'Hometype_Andelsbolig' ?
UNUSED_VARIABLES = ['ID', 'home_url_boliga', 'home_url_realtor',
'street_name_number', 'zip_code_town', 'description_of_home', 'floor']
def build_features(df, zip_threshold=0.01):
"""
Wrapper for the methods that engineers new features
zip_threshold (float) :
If a zip code accounts for less observations than
the threshold (0.01 = 1%) it will be grouped with another zip code in the
neighborhood
"""
df = add_balcony_variable(df)
df = make_floor_int(df)
df = add_zip_code_variable(df, zip_threshold)
df = make_m2_price(df)
df = add_neighboorhood_avg_m2_price(df)
df = add_age(df)
df = df.loc[df['age'] < 450]
df = remove_nans(df)
df = onehot_encode_variables(df)
df = remove_unused_variables(df)
df.drop(
["retrieved_on"], axis=1, inplace=True
)
return df
def remove_coorporative_homes(df):
num_datapoints = len(df)
index = df[df["Home_type"] == "Andelsbolig"].index
df.drop(index, inplace=True)
print("{} cooperative dwellings removed".format(num_datapoints-len(df)))
df = df.reset_index(drop=True)
return df
def add_balcony_variable(df):
"""
This function creates a variable "balcony"
The function adds a column of which each element is an integer:
0 = no balcony, 1 = possibility of building balcony, 2 = balcony
It is an ordered variable, not categorical, as balcony is better,
therefore larger, than no balcony
"""
list_does_home_have_balcony = []
num_homes_with_no_description = 0
for description in df["description_of_home"]: #df.iloc[:, description_column_index]:
if type(description) != str:
list_does_home_have_balcony.append(0)
num_homes_with_no_description += 1
continue
# If the home does not have a balcony but there is an option of adding
# one, the realtor will typically write "mulighed for altan" or
# "altan projekt" (balcony project)
if "mulighed for altan" in description or "altanprojekt" in description or "altan projekt" in description:
list_does_home_have_balcony.append(1)
continue
if "altan" in description or "terrasse" in description or "tagterrasse" in description:
list_does_home_have_balcony.append(2)
continue
list_does_home_have_balcony.append(0)
df["balcony"] = list_does_home_have_balcony
print("{} homes had no description".format(num_homes_with_no_description))
return df
def make_floor_int(df):
floor_as_int = []
for i in range(len(df)):
# Only house types "villalejlighed" (flat in villa) and "ejerlejlighed" (flat)
# has floor numbers
if df["Home_type"][i] == "Villalejlighed" or df["Home_type"][i] == "Ejerlejlighed":
try:
floor_as_int.append(int(df["floor"][i][0]))
except:
median_value = int(round(statistics.median(floor_as_int)))
floor_as_int.append(median_value)
print("Error converting floor to int in line {}. Inserting median value: {}".format(i, median_value))
else:
floor_as_int.append(0)
df["floor_as_int"] = floor_as_int
return df #floor_as_int
def get_zips_to_be_grouped(df, threshold):
"""
Helper function for add_zip_code_variable()
If an area has more than one zipcode (e.g. Frederiksberg C), those of
the zipcodes that account for less than 1 % (per default) of datapoints,
all zip codes within the area will be grouped into 1 zipcode
Parameters
----------
df : Pandas dataframe
Df with data
threshold : Float
If a zip code accounts for fewer than 'threshold' datapoints, it will
be grouped into one
Returns
-------
zips_to_be_grouped : SET
"""
zip_code_occurences = df.zip_code_town.value_counts()
zips_to_be_grouped = []
threshold = len(df) * threshold
print("Grouping zip codes with fewer than {} datapoints".format(threshold))
for i in range(len(zip_code_occurences)):
area = zip_code_occurences.index[i]
if zip_code_occurences[i] < threshold:
zips_to_be_grouped.append(area)
# using set() for higher look up speed
return set(zips_to_be_grouped)
def add_zip_code_variable(df, threshold=0.01):
"""
Some zip codes in Copenhagen cover very small areas whereas others cover
very large areas. The zip codes covering small areas are not well repre-
sented in the dataset. Therefore, I group zip codes that have few datapoints
in groups that represent the area of Copenhagen the zip code belongs to.
E.g.
Parameters
----------
df : PANDAS DATAFRAME
df with data
threshold : FLOAT
If a zip code accounts for fewer than 'threshold' datapoints, it will
be grouped into one
Returns
-------
Enriched df
"""
# Identifying zip codes that account for fewer observations than the threshold
zips_to_be_grouped = get_zips_to_be_grouped(df, threshold)
zipcodes = []
for i in range(len(df)):
area = df.zip_code_town[i] # e.g. "2000 Frederiksberg"
if area in zips_to_be_grouped:
if "København V" in area:
zipcodes.append("1600")
if "København K" in area:
zipcodes.append("1300")
if "Frederiksberg C" in area:
zipcodes.append("1900")
if "Rødovre" in area:
zipcodes.append("2610")
else:
# The first word of the string 'area' is the zipcode
zipcode = area[:4]
try:
int(zipcode)
except:
print("{} in row {} of zip_code_town is not a number. Appending NaN".format(zipcode, i))
zipcodes.append("NaN")
zipcodes.append(zipcode)
assert len(zipcodes) == len(df)
df["zipcodes"] = zipcodes
return df
def make_m2_price(df):
df["m2_price"] = df.list_price_dkk / df.home_size_m2
return df
def add_neighboorhood_avg_m2_price(df):
grouped_df = df.groupby("zipcodes")
mean_df = grouped_df.mean()
mean_df = mean_df.reset_index()
zipcode_avg_m2_price = []
for i in range(len(df)):
#print(i)
zipcode = df["zipcodes"][i]
#print("zipcode")
#print(zipcode)
index = mean_df.index[mean_df["zipcodes"] == zipcode] #mean_df["zipcodes"].index(zipcode)
avg_price = float(mean_df.iloc[index, -1])
#print(avg_price)
zipcode_avg_m2_price.append(avg_price)
df["zipcode_avg_m2_price"] = zipcode_avg_m2_price
return df
def add_age(df):
# Adding age feature and removing "built_year"
df["age"] = 2021 - df["built_year"]
df.drop(["built_year"], axis=1, inplace=True)
return df
def remove_nans(df):
# Using heuristics to replace nans with reasonable values
for index in df.index:
if math.isnan(df.loc[index, "rooms"]):
if df.loc[index, "home_size_m2"] < 40:
df.loc[index, "rooms"] = 1 #df.at[home_size_m2_idx, i] = 1
elif df.loc[index, "home_size_m2"] < 70:
df.loc[index, "rooms"] = 2
elif df.loc[index, "home_size_m2"] < 100:
df.loc[index, "rooms"] = 3
else:
df.loc[index, "rooms"] = 4
if math.isnan(df.loc[index, "lotsize_m2"]):
if df.loc[index, "Home_type"] == "Ejerlejlighed" or df.loc[index, "Home_type"] == "Andelsbolig":
df.loc[index, "lotsize_m2"] = 0
else:
df.loc[index, "lotsize_m2"] = df["lotsize_m2"].mean()
if math.isnan(df.loc[index, "expenses_dkk"]):
df.loc[index, "expenses_dkk"] = df["expenses_dkk"].mean()
if math.isnan(df.loc[index, "age"]):
df.loc[index, "age"] = round(df["age"].mean())
# Removing observations for which the any of the relevant variables are null
variables_to_remove_if_null = ["list_price_dkk", "rooms", "home_size_m2"]
for variable in variables_to_remove_if_null:
nulls = pd.isnull(df[variable])
df = df[~nulls]
return df
def onehot_encode_variables(df):
onehot_encoded_variables = []
# One hot encoding zipcodes
zipcodes_onehot = pd.get_dummies(df.zipcodes, prefix="Zipcode")
df = | pd.concat([df, zipcodes_onehot], axis=1) | pandas.concat |
""" Format data """
from __future__ import division, print_function
import pandas as pd
import numpy as np
import re
from os.path import dirname, join
from copy import deepcopy
import lawstructural.lawstructural.constants as lc
import lawstructural.lawstructural.utils as lu
#TODO: Take out entrant stuff from lawData
class Format(object):
""" Basic class for formatting dataset """
def __init__(self):
self.dpath = join(dirname(dirname(__file__)), 'data')
self.data_sets = self.data_imports()
self.data = self.data_sets['usn']
self.ent_data = pd.DataFrame([])
@staticmethod
def _col_fix(col):
""" Fix column strings to be R-readable as well and to be consistent
with older datasets. Think about changing name through rest of program
instead.
"""
col = re.sub('%', 'Percent', col)
col = re.sub('[() -/]', '', col)
if col[0].isdigit():
col = re.sub('thpercentile', '', col)
col = 'p' + col
if col == 'Name':
col = 'school'
if col == 'Issueyear':
col = 'year'
if col == 'Overallrank':
col = 'OverallRank'
return col
@staticmethod
def _fix_bad_values(data):
""" Fix known USN data typos """
data.loc[(data['school'] == 'University of Miami') &
(data['year'] == 2000), 'Tuitionandfeesfulltime'] = 21000
data.loc[(data['school'] == 'Emory University') &
(data['year'] == 2006), 'Employmentrateatgraduation'] = 72.4
data.loc[(data['school'] == 'Northwestern University') &
(data['year'] == 2006),
'EmploymentRate9MonthsafterGraduation'] = 99.5
data.loc[(data['school'] == 'Michigan State University') &
(data['year'] == 2001), 'BarpassageRateinJurisdiction'] = 75
data.loc[(data['school'] == 'Mississippi College') &
(data['year'] == 2001), 'BarpassageRateinJurisdiction'] = 80
return data
def usn_format(self):
""" Basic USN import and format """
#data = pd.read_csv(join(self.dpath, 'usn2015.csv'))
data = pd.read_csv(join(self.dpath, 'Law1988-2015.csv'))
data = data[['Name', 'Value', 'Metric description', 'Issue year']]
data = pd.pivot_table(data, values='Value',
index=['Name', 'Issue year'],
columns='Metric description')
data = data.reset_index()
names = data.columns.tolist()
data.columns = [self._col_fix(el) for el in names]
data = self._fix_bad_values(data)
data = data.sort(['school', 'year'])
data['year'] = data['year'].astype(int)
return data
def cpi_format(self):
""" Basic CPI import and format """
data = pd.read_csv(join(self.dpath, 'lawCPI.csv'))
# Make up for reporting vs data year in USNews and BLS
data['year'] = data['year'] + 2
data = data[data['year'] <= 2015]
data = data.reset_index(drop=True)
return data
@staticmethod
def _id_name_fix(col):
""" Fix outdated names of schools from id dataset """
#FIXME: Find out why this doesn't work for Drexel, Cath U
old = ['Phoenix School of Law',
'Chapman University',
'Drexel University (Mack)',
'Indiana University--Indianapolis',
'Texas Wesleyan University',
'Catholic University of America (Columbus)',
'John Marshall Law School']
new = ['Arizona Summit Law School',
'Chapman University (Fowler)',
'Drexel University',
'Indiana University--Indianapolis (McKinney)',
'Texas A&M University',
'The Catholic University of America',
'The John Marshall Law School']
for i in xrange(len(old)):
col = re.sub(old[i], new[i], col)
return col
def id_format(self):
""" Import LSAC id's. Note that Irvine doesn't have an id. """
data = pd.read_csv(join(self.dpath, 'USNewsNameStateID.csv'))
data['name'] = [self._id_name_fix(col) for col in data['name']]
return data
def elec_format(self):
""" Import yearly electricity prices """
data = pd.read_csv(join(self.dpath, 'lawElectricity.csv'))
states = pd.read_csv(join(self.dpath, 'lawStateAbbr.csv'))
# Change state names to abbreviations
data = pd.merge(data, states)
data = data.drop('state', 1)
columns = data.columns.tolist()
index = columns.index('abbr')
columns[index] = 'state'
data.columns = columns
data['year'] = data['year'] + 2
return data
def data_imports(self):
""" Import dictionary of initially formatted datasets
Datasets are as follows with corresponding sources/scrapers
usn
---
- Data: US News and World Report
- Source: ai.usnews.com
cpi
---
- Data: CPI data from BLS
- Source: http://data.bls.gov/cgi-bin/dsrv?cu
Series Id: CUUR0000SA0,CUUS0000SA0
Not Seasonally Adjusted
Area: U.S. city average
Item: All items
Base Period: 1982-84=100
Years: 1986 to 2015
- Used to be data.bls.gov/timeseries/LNS14000000
wage
----
- Data: Market wages for lawyers from BLS
- Source: bls.gov/oes
states
------
- Data: US News name/state combinations
- Source: US News Top Law Schools
- Scraper: StateScraper.py
id
--
- Data: US News names and LSAC ID combinations
- Source: http://www.lsac.org/lsacresources/publications/
official-guide-archives
- Scraper: NameScraperLSAC.py
entrants
--------
- Data: School entrants, with id's and dates
- Source: http://www.americanbar.org/groups/legal_education/
resources/aba_approved_law_schools/in_alphabetical_order.html
via
http://en.wikipedia.org/
wiki/List_of_law_schools_in_the_United_States
- Scraper: entryscraper.py
electricity
-----------
- Data: State/Country level electricity prices
- Source: eia.gov/electricity/monthly/backissues.html
- Scraper: ElecScraper.py
Returns
-------
data_sets: dict; data sets from specified sources
"""
data_sets = {
'usn': self.usn_format(),
'cpi': self.cpi_format(),
'states': pd.read_csv(join(self.dpath, 'lawNameState.csv')),
'id': self.id_format(),
'entrants': pd.read_csv(join(self.dpath, 'lawEntrants.csv')),
'electricity': self.elec_format(),
'stateregions': pd.read_csv(join(self.dpath, 'StateRegions.csv')),
'aaup_comp_region': pd.read_csv(join(self.dpath,
'aaup_comp_region.csv')),
'aaup_comp': pd.read_csv(join(self.dpath, 'aaup_comp.csv')),
'aaup_salary_region': pd.read_csv(join(self.dpath,
'aaup_salary_region.csv')),
'aaup_salary': pd.read_csv(join(self.dpath, 'aaup_salary.csv'))
}
return data_sets
def fill_ranks(self):
""" Generate top/bottom/inside/squared rank variables,
fill in unranked schools
"""
# Indicate top/bottom ranked schools
self.data['TopRanked'] = 1 * (self.data['OverallRank'] == 1)
self.data['BottomRanked'] = 1 * (self.data['OverallRank'] ==
np.nanmax(self.data['OverallRank']))
self.data['InsideRanked'] = 1 * ((self.data['OverallRank'] > 1) &
(self.data['OverallRank'] <
np.nanmax(self.data['OverallRank'])))
# Squared rank
self.data['OverallRankSquared'] = self.data['OverallRank']**2
# Fill in un-ranked schools as max(rank) + 1 or lc.UNRANKED
mask = pd.isnull(self.data['OverallRank'])
#unranked = np.nanmax(self.data['OverallRank']) + 1
unranked = lc.UNRANKED
self.data['OverallRank'][mask] = unranked
self.data['Ranked'] = 1 * (self.data['OverallRank'] != unranked)
def combine_tuition(self):
""" Combine Full-time and Out-of-State Tuitions """
self.data['Tuition'] = self.data['Tuitionandfeesfulltime']
self.data['Tuition'] = self.data['Tuition'].fillna(
value=self.data['Outofstatetuitionandfeesfulltime']
)
def lags(self):
""" Generate various lags (including tuition alignment) """
lag_vars = ['OverallRank', 'Ranked']
lag_vars = [lag_vars, lu.reaction_spec('full')[0]]
lag_vars.append(lag_vars[1])
lag_vars[2] = [el + '_comp' for el in lag_vars[2]]
lag_vars = [el for sublist in lag_vars for el in sublist]
for lvar in lag_vars:
self.data[lvar + 'L'] = self.data.groupby('school').apply(
pd.DataFrame.shift)[lvar]
def add_entrants(self):
""" Add indicators for when schools entered """
self.data_sets['entrants']['Founded'] = \
self.data_sets['entrants']['Founded'] + 2
self.data['entry'] = 0
zipped = zip(self.data_sets['entrants']['Founded'],
self.data_sets['entrants']['SchoolUS'])
for enter in zipped:
self.data.loc[(self.data['school'] == enter[1]) &
(self.data['year'] == enter[0]), 'entry'] = 1
def combine_dsets(self):
""" Add in other members of self.data_sets to self.data """
# Location and id
self.data['id'] = 0
for name in self.data['school'].unique():
self.data.ix[self.data['school'] == name, 'state'] = \
self.data_sets['states']['place'].where(
self.data_sets['states']['name'] == name).max()
self.data.ix[self.data['school'] == name, 'id'] = \
self.data_sets['id']['id'].where(
self.data_sets['id']['name'] == name).max()
# Electricity
self.data = pd.merge(self.data, self.data_sets['electricity'],
how='outer', sort=True)
# CPI
self.data = pd.merge(self.data, self.data_sets['cpi'])
# Regions
self.data = pd.merge(self.data, self.data_sets['stateregions'])
# AAUP data sets
aaup_dsets = ['aaup_salary_region',
'aaup_salary']
for dset in aaup_dsets:
self.data = | pd.merge(self.data, self.data_sets[dset], how='outer') | pandas.merge |
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pyspark import sql
from pysparkhelpers import helpers
@pytest.mark.usefixtures("hive_context")
def test_single(hive_context):
df = pd.DataFrame({'user': ['a', 'a', 'a', 'b'],
'values': [1, 1, 1, 4]})
sdf = hive_context.createDataFrame(df).cache()
def func(x):
return x.sum()
expected = pd.DataFrame({'user': ['a', 'b'], 'values': [3, 4]})
returned_sdf = (
helpers.udaf('user', func, sdf).toPandas()
.sort_values('user')
.reset_index(drop=True)
)
assert_frame_equal(expected, returned_sdf)
returned_rdd = (
helpers.udaf('user', func, sdf.rdd).toPandas()
.sort_values('user')
.reset_index(drop=True)
)
assert_frame_equal(expected, returned_rdd)
def func_value(x):
return 3
expected_value = pd.DataFrame({'user': ['a', 'b'], 'value': [3, 3]})
returned_value = (
helpers.udaf('user', func_value, sdf).toPandas()
.sort_values('user')
.reset_index(drop=True)
)
| assert_frame_equal(expected_value, returned_value) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 20:03:24 2019
@author: RV
"""
# Python(R)
# Modeules/packageslibraries
# OS - submodules/path/join
#eg. (os.path.join)
# pandas
# scipy
# onspy
#%% Setup
import os
projFld = "C:/Users/RV/Documents/Teaching/2019_01_Spring/ADEC7430_Spring2019/Lecture02"
codeFld = os.path.join(projFld, "PyCode")
fnsFld = os.path.join(codeFld, "_Functions")
outputFld = os.path.join(projFld, "Output")
rawDataFld = os.path.join(projFld, "RawData")
savedDataFld = os.path.join(projFld, "SavedData")
#%% load some functions
fnList = ["fn_logMyInfo"] # this is a list
for fn in fnList:
exec(open(os.path.join(fnsFld, fn + ".py")).read())
# Explain: name, extension name, read+write
# create also a file where we will log some data
logf = os.path.join(rawDataFld, "logfile.csv")
# test writing to log
from _Functions.fn_logMyInfo import fn_logMyInfo
fn_logMyInfo("test writing to log", useConsole=True, useFile=logf)
# can we enhance what we write? How about we add a timestamp?
#%% introduction to datetime
import datetime as DT
# what time is it now?
DT.datetime.now() # do you like the format?
# micro-seconds
# can we format this more friendly?
nowtime = DT.datetime.now()
nowtimef = nowtime.strftime(format="%Y-%m-%d %H:%M:%S") # remember this
nowtimef
type(nowtimef) # so this is a string
# let's add microseconds as well
nowtimef = nowtime.strftime(format="%Y-%m-%d %H:%M:%S.%f") # remember this
nowtimef
#%%
# do you want to keep writing the long formula above? I'd rather write a function
def nowdt():
return(DT.datetime.now().strftime(format="%Y-%m-%d %H:%M:%S.%f"))
nowdt()
#%% now let's add timestamp to our log output
fn_logMyInfo(nowdt() + "," + " second test writing to log", useFile = logf)
# open the log file - do you notice anything unpleasant? e.g. the messages are appended
# can we try to save to a new line?
fn_logMyInfo("\n" + nowdt() + "," + " second test writing to log", useFile = logf)
# this is better... but lengthened our logging function quite a lot
#@@@@ add here a wrapper for this function, with defaults for: newline, timestamp, using given file
# Excel file doesn't show a micro-second digits but a text reader will do.
#%% Remember how this function works...
#@@@@ how to print the function so we can see how it's put together?
#%%
#==============================================================================
# Exploratory analysis
#==============================================================================
# Data from Kaggle's Titanic challenge comes already split in Train & Test
# See slides - why do we need this split?
# point to the files
rawTrainFile = os.path.join(rawDataFld, "Lecture2_train.csv")
rawTestFile = os.path.join(rawDataFld, "Lecture2_test.csv")
#%% Pandas - transformation and data management, package
# read them into pandas DataFrame
import pandas as pd
rawTrain = pd.read_csv(rawTrainFile, sep=',')
# ParserWarning: Falling back to the "python" engine b/c the "c" engine doesn't support regex separators (seperators > 1 char and different from '\s+' are interpreted as regax)
# So you cannot do sep = ',,' and we don't have to do anything with lines '\n'
# If you want to test the warnings you would get when longer separators exist...
# let's understand a bit this DataFrame
# size
rawTrain.shape # how many rows, how many columns?
# print top 7 records
rawTrain.head(7) # notice the dots?
# let's expand the view - need options for pandas printout
pd.set_option('display.width', 1000)
pd.set_option('max_colwidth', 500)
pd.set_option('display.max_columns', 12)
rawTrain.head(7) # does it look better? did the dots vanish or are they still there?
# What is rawTrain?
type(rawTrain)
# list all columns
rawTrain.columns
# list all columns AND their types
rawTrain.dtypes # to think... if CSV is a text file, how did pandas figure out the types?
# does it make sense to have Age "float64"? (fractional) B/C Age has missing values
# int64 doesn't allow for missing values
#L Let's force pandas to read everything as a character
rawTrain_c = pd.read_csv(rawTrainFile, sep=',', dtype=object)
rawTrain_c.dtypes
rawTrain_c.head(5)
# for numeric variables, try this:
rawTrain.describe() # anything interesting here? See Age.
# Only numeric data. Missing data in Age b/c less counts than others.
# are there missing values?
pd.isnull(rawTrain)
pd.isnull(rawTrain).astype(int)
| pd.isnull(rawTrain) | pandas.isnull |
import datetime
from typing import List
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
BatchSpec,
RuntimeBatchRequest,
)
from great_expectations.core.batch_spec import (
PathBatchSpec,
RuntimeDataBatchSpec,
RuntimeQueryBatchSpec,
S3BatchSpec,
)
from great_expectations.core.id_dict import IDDict
from great_expectations.data_context.types.resource_identifiers import BatchIdentifier
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource import Datasource
from great_expectations.datasource.data_connector import RuntimeDataConnector
yaml = YAML()
@pytest.fixture
def basic_datasource_with_assets(tmp_path_factory):
basic_datasource: Datasource = instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
batch_identifiers:
- hour
- minute
assets:
asset_a:
batch_identifiers:
- day
- month
asset_b:
batch_identifiers:
- day
- month
- year
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
return basic_datasource
def test_self_check(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert test_runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"data_assets": {},
"example_data_asset_names": [],
"example_unmatched_data_references": [],
"note": "RuntimeDataConnector will not have data_asset_names until they are "
"passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
}
def test_self_check_named_assets(basic_datasource_with_assets):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
assert test_runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"example_data_asset_names": ["asset_a", "asset_b"],
"data_assets": {
"asset_a": {"batch_definition_count": 0, "example_data_references": []},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_new_self_check_after_adding_named_asset_a(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
res: List[
BatchDefinition
] = runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={"month": 4, "day": 1},
runtime_parameters={"batch_data": test_df_pandas},
)
)
assert runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"example_data_asset_names": ["asset_a", "asset_b"],
"data_assets": {
"asset_a": {
"batch_definition_count": 1,
"example_data_references": ["4-1"],
},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_new_self_check_after_adding_new_asset_c(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
res: List[
BatchDefinition
] = runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_c",
batch_identifiers={"hour": 12, "minute": 15},
runtime_parameters={"batch_data": test_df_pandas},
)
)
assert runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 3,
"example_data_asset_names": ["asset_a", "asset_b", "asset_c"],
"data_assets": {
"asset_a": {"batch_definition_count": 0, "example_data_references": []},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
"asset_c": {
"batch_definition_count": 1,
"example_data_references": ["12-15"],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_add_batch_identifiers_correct(basic_datasource_with_assets):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
assert test_runtime_data_connector._batch_identifiers == {
"runtime": ["hour", "minute"],
"asset_a": ["day", "month"],
"asset_b": ["day", "month", "year"],
}
def test_batch_identifiers_missing_completely():
# missing from base DataConnector
with pytest.raises(ge_exceptions.DataConnectorError):
instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
def test_batch_identifiers_missing_from_named_asset():
with pytest.raises(ge_exceptions.DataConnectorError):
basic_datasource: Datasource = instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
batch_identifiers:
- hour
- minute
assets:
asset_a:
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
def test_error_checking_unknown_datasource(basic_datasource):
test_df: pd.DataFrame = | pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}) | pandas.DataFrame |
from dataProcessing import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import IsolationForest
import pandas as pd
import pickle
from RandomForestCounterFactual import *
def checkSamples(
datasetFileName,
unscaledFactualsFileName,
unscaledCounterFactualsFileName,
serializedClassifierFileName,
roundMace = False
):
reader = DatasetReader(datasetFileName,rangeFeasibilityForDiscreteFeatures=True)
# Classifier
clfRead, clfScaled = reader.readRandomForestFromPickleAndApplyMinMaxScaling(serializedClassifierFileName)
# Factuals
unscaledFactuals = pd.read_csv(unscaledFactualsFileName)
scaledFactuals = pd.DataFrame()
# Counterfactuals
unscaledCounterFactuals = pd.read_csv(unscaledCounterFactualsFileName)
scaledCounterFactuals = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
.. module:: courtship
:synopsis: Classes for Fly objects.
.. moduleauthor:: <NAME>
"""
import numpy as np
import pandas as pd
from .behavior import Behavior
class Point(object):
"""Stores 2D coordinates as attributes.
Attributes
----------
row : np.ndarray or None (default = None)
Array to store row-coordinates of point
col : np.ndarray or None (default = None)
Array to store col-coordinates of point
"""
def __init__(self):
self.row = None
self.col = None
def coords_rc(self):
"""Stacks row and col coordinates into an [N, 2] np.ndarray.
Returns
-------
coords : np.ndarray of shape [N, 2].
First column contains row-coordinates. Second column contains
col-coordinates.
"""
return np.vstack((self.row, self.col)).T
def coords_xy(self):
"""Stacks col and row coordinates into an [N, 2] np.ndarray.
Returns
-------
coords_xy : np.ndarray of shape [N, 2].
First column contains col-coordinates. Second column contains
row-coordinates.
"""
return np.vstack((self.col, self.row)).T
class Ellipse(object):
"""Base class that will be used to describe body and wing positions.
Attributes
----------
centroid : Point object
Contains centroid coordinates for Ellipse.
minor_axis_length : np.ndarray or None (default = None)
Contains minor axis length of Ellipse.
major_axis_length : np.ndarray or None (default = None)
Contains major axis length of Ellipse.
orientation : np.ndarray or None (default = None)
Contains angle (from -np.pi/2 to np.pi/2) major_axis_length
of Ellipse makes with Cartesian x-axis.
"""
def __init__(self):
self.centroid = Point()
self.major_axis_length = None
self.minor_axis_length = None
self.orientation = None
def init_params(self, size):
"""Initilizes space to hold ellipse data.
All parameters will be initilized as np.zeros(n).
.. warning:: Any values held within an attribute will
be overriden.
Parameters
----------
size : int
Length of array to initilize for all parameters.
"""
for key in self.__dict__.keys():
if isinstance(self.__dict__[key], Point):
self.__dict__[key].row = np.zeros(size)
self.__dict__[key].col = np.zeros(size)
else:
self.__dict__[key] = np.zeros(size)
def _dig(self, d, all_params):
"""Recursively digs into objects/dictionaries to return all parameters.
Parameters
----------
d : dictionary
Highest level parameter dictionary. Initialize as self.__dict__
to generate a full - deep - parameter list.
all_params : dictionary
Initialize as empty. This will be the final dictionary containing
all parameters.
"""
for key, val in d.iteritems():
if isinstance(val, Point):
all_params[key] = self._dig(val.__dict__, dict())
else:
all_params[key] = val
return all_params
@staticmethod
def _combine_keys(key_string, add_to_dict):
"""Adds string, key_string, to all keys within a dictionary.
Parameters
----------
key_string : string
String to add to all keys within d.
add_to_dict : dictionary
Dictionary
"""
updated_dict = dict()
for key, val in add_to_dict.iteritems():
updated_dict[key_string + '_' + key] = val
return updated_dict
def get_params(self, return_dict=True):
"""Gets all parameters as a dictionary.
Parameters
----------
return_dict : bool (default = True)
Whether to return a dictionary or a pandas DataFrame
containing Ellipse data.
"""
params = self._dig(self.__dict__, dict())
if return_dict:
return params
df = pd.DataFrame()
for k1, v1 in params.iteritems():
if isinstance(v1, np.ndarray):
df[k1] = v1
if isinstance(v1, dict):
for k2, v2 in self._combine_keys(k1, v1).iteritems():
df[k2] = v2
# return DataFrame where columns have been sorted alphabetically.
return df.reindex_axis(sorted(df.columns), axis=1)
def area(self):
if self.major_axis_length is None:
return None
half_maj = 0.5 * self.major_axis_length
half_min = 0.5 * self.minor_axis_length
return np.pi * half_maj * half_min
class Body(Ellipse):
"""The body is a distinct type of ellipse with directionality.
Attributes
----------
rotation_angle : np.ndarray or None (default = None)
Angle (from 0 to 2*np.pi) needed to rotate Ellipse such that
the ellipse is oriented with the rear-to-head axis pointing
to the right along the Cartesian x-axis. Clockwise or counter?
head : Point object
Coordinates of head of ellipse.
rear : Point object
Coordinates of rear of ellipse.
"""
def __init__(self):
Ellipse.__init__(self)
self.rotation_angle = None
self.head = Point()
self.rear = Point()
class Wing(Ellipse):
"""Instance of Ellipse."""
def __init__(self):
Ellipse.__init__(self)
class Fly(object):
"""Class used to keep track of features during tracking.
Attributes
----------
body : Body object
Ellipse fitted to body of fly (excludes wings).
left_wing : Wing object
Ellipse fitted to left wing of fly.
right_wing : Wing object
Ellipse fitted to right wing of fly.
n_frames : int
Total number of frames in tracked fly.
timestamps : np.ndarray of shape [n_frames]
Timestamps of video from which fly was tracked.
behaviors : list of Behavior
All behaviors that the fly engaged in during tracking.
"""
def __init__(self, n_frames=None):
"""A fly is composed of three ellipses fitted to (1) the body,
(2) the right wing, and (3) the left wing.
"""
self.body = Body()
self.right_wing = Wing()
self.left_wing = Wing()
self.timestamps = None
self.n_frames = None
self.behaviors = []
if n_frames is not None:
self.init_params(n_frames)
def init_params(self, size):
"""Initializes space for all parameters.
.. warning:: Any values held within an attribute will
be over-ridden. Therefore, only call this function during the
initialization of a Fly object.
Parameters
----------
n : int
Number of frames to initialize space for each of the
following parameters:
body, left_wing and right_wing.
"""
if type(size) != int:
raise AttributeError('`size` must be of type int.')
self.n_frames = size
self.timestamps = np.zeros(size)
self.body.init_params(size)
self.left_wing.init_params(size)
self.right_wing.init_params(size)
def get_behavior(self, name):
"""Gets a specified behavior from this Fly's `behaviors` list.
Parameters
----------
name : string
Name of behavior to return.
Returns
-------
Behavior
Specified behavior.
Raises
------
AttributeError :
If behavior does not exist.
"""
for behav in self.behaviors:
if behav.name == name:
return behav
raise AttributeError("Behavior '{}' not found.".format(name))
def get_all_behaviors_as_dict(self):
"""Gets all of the behaviors contained within this fly and returns
them as a dictionary of binary arrays.
Returns
-------
behaviors : dict
Each key is the behavior name, each value its associated binary
array.
"""
behaviors = {}
for behavior in self.behaviors:
behaviors[behavior.name] = behavior.as_array()
return behaviors
def add_behavior(self, behavior):
"""Adds a specified behavior to this Fly's behavior list.
Parameters
----------
behavior : Behavior
Behavior to add.
"""
if not isinstance(behavior, Behavior):
raise AttributeError('passed `behavior` must be of type ' +
'Behavior.')
self.behaviors.append(behavior)
def add_behavior_from_array(self, behavior_name, behavior_arr):
"""Generates a behavior from a behavioral/classification array, and
adds it to this Fly's behavior list.
Parameters
-----------
behavior_name : string
Name of behavior to add.
behavior_arr : 1d array-like
Array to generate a Behavior from, and add to Fly.behaviors list.
"""
behavior_arr = np.asarray(behavior_arr)
if behavior_arr.size != self.n_frames:
msg = (
'Please assure that `behavior_arr` '+
'contains the correct number of frames. ' +
'n_frames != behavior_arr.size ' +
'({} != {})'.format(self.n_frames, behavior_arr.size)
)
raise AttributeError(msg)
behavior = Behavior.from_array(behavior_name, behavior_arr)
self.behaviors.append(behavior)
def list_behaviors(self):
"""Gets a list of all the current Behavior names in this Fly.
Returns
-------
behavior_names : list of string
Current Behavior names contained in this Fly.
"""
behavior_names = []
for behav in self.behaviors:
behavior_names.append(behav.name)
return behavior_names
def subset(self, start_frame, end_frame):
"""Returns this fly containing only a subset of it's total frames."""
if start_frame >= self.n_frames:
raise AttributeError('`start_frame` must be <= n_frames')
if end_frame >= self.n_frames:
end_frame = self.n_frames
fly_df = self.to_df()
fly_df_subset = fly_df.iloc[start_frame:end_frame, :]
return self.from_df(fly_df_subset)
def from_csv(self, csv_file):
"""Allows the creation of a Fly from a csv file.
.. note:: see Fly.to_df for a list of required column names.
Parameters
----------
csv_file : string
Path to file containing fly data.
"""
fly_df = pd.read_csv(csv_file)
fly = self.from_df(fly_df)
return fly
@classmethod
def from_df(cls, fly_df):
"""Generates a fly object from a dataframe.
Parameters
----------
fly_df : pandas.DataFrame object
DataFrame from which to generate Fly object.
Returns
-------
Fly :
Fly contained within DataFrame.
"""
fly = cls()
for colname in fly_df.columns.values.tolist():
col_id = colname.split('_')
if col_id[0] == 'body':
if 'row' not in col_id and 'col' not in col_id:
setattr(
fly.body,
'_'.join(col_id[1:]),
fly_df[colname].values
)
else:
if 'centroid' in col_id:
setattr(
fly.body.centroid,
col_id[-1],
fly_df[colname].values
)
elif col_id[1] == 'head':
setattr(
fly.body.head,
col_id[-1],
fly_df[colname].values
)
else:
setattr(
fly.body.rear,
col_id[-1],
fly_df[colname].values
)
elif col_id[0] == 'left':
if 'row' not in col_id[-1] and 'col' not in col_id[-1]:
setattr(
fly.left_wing,
'_'.join(col_id[1:]),
fly_df[colname].values
)
else:
setattr(
fly.left_wing.centroid,
col_id[-1],
fly_df[colname].values
)
elif col_id[0] == 'right':
if 'row' not in col_id and 'col' not in col_id:
setattr(
fly.right_wing,
'_'.join(col_id[1:]),
fly_df[colname].values
)
else:
setattr(
fly.right_wing.centroid,
col_id[-1],
fly_df[colname].values
)
elif col_id[0] == 'behavior':
behavior_name = '_'.join(col_id[1:])
new_behavior = Behavior.from_array(
behavior_name,
fly_df[colname].values)
fly.behaviors.append(new_behavior)
elif col_id[0] == 'timestamps':
fly.timestamps = fly_df[colname].values
else:
print (
'UserWarning: Column - {} -'.format(colname) +
' found in passed data frame. This column has no ' +
' associated attribute in Fly object. Skipping.'
)
fly.n_frames = fly_df.shape[0]
return fly
def to_csv(self, csv_file):
"""Save Fly in .csv format.
Parameters
----------
csv_file : string
File path to save Fly.
"""
fly_df = self.to_df()
fly_df.to_csv(csv_file, index=False)
def to_df(self):
"""Returns a pandas.DataFrame object containing all information
about this Fly.
.. note:: Columns will have the following names:
1. body_centroid_col
2. body_centroid_row
3. body_head_col
4. body_head_row
5. body_major_axis_length
6. body_minor_axis_length
7. body_orientation
8. body_rear_col
9. body_rear_row
10. body_rotation_angle
11. left_centroid_col
12. left_centroid_row
13. left_major_axis_length
14. left_minor_axis_length
15. left_orientation
16. right_centroid_col
17. right_centroid_row
18. right_major_axis_length
19. right_minor_axis_length
20. right_orientation
21. timestamps
22. behavior_<behavior_1_name> (optional)
.
.
.
These names correspond to the ellipse fitted to the body,
left wing, and right wing of the fly. Columns 21 and on should
represent behaviors associated with this fly object. They are
optional.
Returns
-------
pandas.DataFrame :
DataFrame containing all tracked features of this Fly.
"""
# get parameters for all instances of Ellipse as DataFrames, and
# append a string descriptor to the head of each column in each
# DataFrame.
body_params = self.body.get_params(return_dict=False)
body_params.columns = [
'body_' + c_name for c_name in body_params.columns.values]
left_wing_params = self.left_wing.get_params(return_dict=False)
left_wing_params.columns = [
'left_' + c_name for c_name in left_wing_params.columns.values
]
right_wing_params = self.right_wing.get_params(return_dict=False)
right_wing_params.columns = [
'right_' + c_name for c_name in right_wing_params.columns.values
]
timestamps = pd.DataFrame()
timestamps['timestamps'] = self.timestamps
to_concat = [
body_params,
left_wing_params,
right_wing_params,
timestamps
]
if len(self.behaviors) != 0:
behaviors_df = pd.DataFrame()
for fly_behavior in self.behaviors:
b_arr = fly_behavior.as_array()
b_name = 'behavior_' + fly_behavior.name
behaviors_df[b_name] = b_arr
to_concat.append(behaviors_df)
fly_df = | pd.concat(to_concat, axis=1) | pandas.concat |
#!usr/local/bin
import pandas as pd
import numpy as np
from sys import argv
import subprocess
def sequence_splice_site(sp_junc, fasta_file):
part = sp_junc
part = part[['chr','first_base','last_base','motif','STAR_annotation','strand']]
part['first_base'] = part['first_base']
#part['chr'] = 'chr' + part['chr'].astype(str)
part['strand'] = part['strand'].apply(lambda x: '+' if x ==1 else ('-' if x==2 else np.nan))
cat1 = pd.DataFrame(columns = ['chr','start','end','def','ss','strand'])
cat2 = pd.DataFrame(columns = ['chr','start','end','def','ss','strand'])
cat1['chr'] = part['chr']
cat1['start'] = part['first_base'] - 26
cat1['end'] = part['first_base'] +25
cat1['strand'] = part['strand']
cat1['ss'] = part['strand'].apply(lambda x: 3 if x =='-' else 5)
cat1['def'] = part.index +',' + cat1['ss'].astype(str)
cat2['chr'] = part['chr']
cat2['start'] = part['last_base'] -26
cat2['end'] = part['last_base'] +25
cat2['strand'] = part['strand']
cat2['ss'] = part['strand'].apply(lambda x: 5 if x =='-' else 3)
cat2['def'] = part.index +',' + cat2['ss'].astype(str)
fin = | pd.concat([cat1,cat2]) | pandas.concat |
#https://www.youtube.com/watch?v=xKvffLRSyPk&list=PL3JVwFmb_BnSLFyVThMfEavAEZYHBpWEd&index=1
import os, io
from google.cloud import vision
import pandas as pd
#from google.cloud.vision import types -> 버전 업그레이드 되면서 types 사용 안함
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'ServiceAccountToken.json'
client = vision.ImageAnnotatorClient()
FILE_NAME = 'jun_name3.png'
FOLDER_PATH = r'C:\Users\Administrator\anaconda3\envs\VisionAPIDemo'
with io.open(os.path.join(FOLDER_PATH, FILE_NAME), 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
df = | pd.DataFrame(columns=['locale', 'description']) | pandas.DataFrame |
"""
This is a place to create a python wrapper for the BASGRA fortran model in fortarn_BASGRA_NZ
Author: <NAME>
Created: 12/08/2020 9:32 AM
"""
import os
import ctypes as ct
import numpy as np
import pandas as pd
from subprocess import Popen
from copy import deepcopy
from input_output_keys import param_keys, out_cols, days_harvest_keys, matrix_weather_keys_pet, \
matrix_weather_keys_penman
from warnings import warn
# compiled with gfortran 64,
# https://sourceforge.net/projects/mingwbuilds/files/host-windows/releases/4.8.1/64-bit/threads-posix/seh/x64-4.8.1-release-posix-seh-rev5.7z/download
# compilation code: compile_basgra_gfortran.bat
# define the dll library path
_libpath_pet = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_pet.DLL')
_libpath_peyman = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_peyman.DLL')
_bat_path = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ\\compile_BASGRA_gfortran.bat')
# this is the maximum number of weather days,
# it is hard coded into fortran_BASGRA_NZ/environment.f95 line 9
_max_weather_size = 36600
def run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False,
dll_path='default', supply_pet=True, auto_harvest=False, run_365_calendar=False):
"""
python wrapper for the fortran BASGRA code
changes to the fortran code may require changes to this function
runs the model for the period of the weather data
:param params: dictionary, see input_output_keys.py, README.md, or
https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PYfor more details
:param matrix_weather: pandas dataframe of weather data, maximum entries set in _max_weather_size in line 24
of this file (currently 36600)
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param days_harvest: days harvest dataframe must be same length as matrix_weather entries
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param doy_irr: a list of the days of year to irrigate on, must be integers acceptable values: (0-366)
:param verbose: boolean, if True the fortran function prints a number of statements for debugging purposes
(depreciated)
:param dll_path: path to the compiled fortran DLL to use, default was made on windows 10 64 bit, if the path does
not exist, this function will try to run the bat file to re-make the dll.
:param supply_pet: boolean, if True BASGRA expects pet to be supplied, if False the parameters required to
calculate pet from the peyman equation are expected,
the version must match the DLL if dll_path != 'default'
:param auto_harvest: boolean, if True then assumes data is formated correctly for auto harvesting, if False, then
assumes data is formatted for manual harvesting (e.g. previous version) and re-formats
internally
:param run_365_calendar: boolean, if True then run on a 365 day calender
This expects that all leap days will be removed from matrix_weather and
days_harvest. DOY is expected to be between 1 and 365. This means that datetime
objects defined by year and doy will be incorrect. instead use
get_month_day_to_nonleap_doy to map DOY to datetime via month and day. This is how
the index of the returned datetime will be passed. For example for date 2024-03-01
(2024 is a leap year) the dayofyear via a datetime object will be 61, but if
expect_no_leap_days=True basgra expects day of year to be 60. the index of the
results will be a datetime object of equivalent to 2024-03-01, so the output doy
will not match the index doy and there will be no value on 2020-02-29.
default False
:return: pd.DataFrame(index=datetime index, columns = out_cols)
"""
assert isinstance(supply_pet, bool), 'supply_pet param must be boolean'
assert isinstance(auto_harvest, bool), 'auto_harvest param must be boolean'
assert isinstance(run_365_calendar, bool), 'expect_no_leap_days must be boolean'
# define DLL library path
use_default_lib = False
if dll_path == 'default':
use_default_lib = True
if supply_pet:
dll_path = _libpath_pet
else:
dll_path = _libpath_peyman
# check that library path exists
if not os.path.exists(dll_path):
if use_default_lib:
# try to run the bat file
print('dll not found, trying to run bat to create DLL:\n{}'.format(_bat_path))
p = Popen(os.path.basename(_bat_path), cwd=os.path.dirname(_bat_path), shell=True)
stdout, stderr = p.communicate()
print('output of bat:\n{}\n{}'.format(stdout, stderr))
if not os.path.exists(dll_path):
raise EnvironmentError('default DLL path not found:\n'
'{}\n'
'see readme for more details:\n'
'{}'.format(dll_path, os.path.dirname(__file__) + 'README.md'))
else:
raise EnvironmentError('DLL path not found:\n{}'.format(dll_path))
# define expected weather keys
if supply_pet:
_matrix_weather_keys = matrix_weather_keys_pet
else:
_matrix_weather_keys = matrix_weather_keys_penman
doy_irr = np.atleast_1d(doy_irr)
# test the input variables
_test_basgra_inputs(params, matrix_weather, days_harvest, verbose, _matrix_weather_keys,
auto_harvest, doy_irr, run_365_calendar=run_365_calendar)
nout = len(out_cols)
ndays = len(matrix_weather)
nirr = len(doy_irr)
# define output indexes before data manipulation
out_index = matrix_weather.index
# copy everything and ensure order is correct
params = deepcopy(params)
matrix_weather = deepcopy(matrix_weather.loc[:, _matrix_weather_keys])
days_harvest = deepcopy(days_harvest.loc[:, days_harvest_keys])
# translate manual harvest inputs into fortran format
if not auto_harvest:
days_harvest = _trans_manual_harv(days_harvest, matrix_weather)
# get variables into right python types
params = np.array([params[e] for e in param_keys]).astype(float)
matrix_weather = matrix_weather.values.astype(float)
days_harvest = days_harvest.values.astype(float)
doy_irr = doy_irr.astype(np.int32)
# manage weather size,
weather_size = len(matrix_weather)
if weather_size < _max_weather_size:
temp = np.zeros((_max_weather_size - weather_size, matrix_weather.shape[1]), float)
matrix_weather = np.concatenate((matrix_weather, temp), 0)
y = np.zeros((ndays, nout), float) # cannot set these to nan's or it breaks fortran
# make pointers
# arrays # 99% sure this works
params_p = np.asfortranarray(params).ctypes.data_as(ct.POINTER(ct.c_double)) # 1d array, float
matrix_weather_p = np.asfortranarray(matrix_weather).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
days_harvest_p = np.asfortranarray(days_harvest).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
y_p = np.asfortranarray(y).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
doy_irr_p = np.asfortranarray(doy_irr).ctypes.data_as(ct.POINTER(ct.c_long))
# integers
ndays_p = ct.pointer(ct.c_int(ndays))
nirr_p = ct.pointer(ct.c_int(nirr))
nout_p = ct.pointer(ct.c_int(nout))
verb_p = ct.pointer(ct.c_bool(verbose))
# load DLL
for_basgra = ct.CDLL(dll_path)
# run BASGRA
for_basgra.BASGRA_(params_p, matrix_weather_p, days_harvest_p, ndays_p, nout_p, nirr_p, doy_irr_p, y_p, verb_p)
# format results
y_p = np.ctypeslib.as_array(y_p, (ndays, nout))
y_p = y_p.flatten(order='C').reshape((ndays, nout), order='F')
y_p = pd.DataFrame(y_p, out_index, out_cols)
if run_365_calendar:
mapper = get_month_day_to_nonleap_doy(key_doy=True)
strs = [f'{y}-{mapper[doy][0]:02d}-{mapper[doy][1]:02d}' for y, doy in zip(y_p.year.values.astype(int),
y_p.doy.values.astype(int))]
y_p.loc[:, 'date'] = pd.to_datetime(strs)
else:
strs = ['{}-{:03d}'.format(int(e), int(f)) for e, f in y_p[['year', 'doy']].itertuples(False, None)]
y_p.loc[:, 'date'] = | pd.to_datetime(strs, format='%Y-%j') | pandas.to_datetime |
import os
import uuid
from datetime import datetime
from time import sleep
import fsspec
import pandas as pd
import pytest
import v3iofs
from storey import EmitEveryEvent
import mlrun
import mlrun.feature_store as fs
from mlrun import store_manager
from mlrun.datastore.sources import CSVSource, ParquetSource
from mlrun.datastore.targets import CSVTarget, NoSqlTarget, ParquetTarget
from mlrun.features import Entity
from tests.system.base import TestMLRunSystem
@TestMLRunSystem.skip_test_if_env_not_configured
# Marked as enterprise because of v3io mount and remote spark
@pytest.mark.enterprise
class TestFeatureStoreSparkEngine(TestMLRunSystem):
project_name = "fs-system-spark-engine"
spark_service = ""
pq_source = "testdata.parquet"
csv_source = "testdata.csv"
spark_image_deployed = (
False # Set to True if you want to avoid the image building phase
)
test_branch = "" # For testing specific branch. e.g.: "https://github.com/mlrun/mlrun.git@development"
@classmethod
def _init_env_from_file(cls):
env = cls._get_env_from_file()
cls.spark_service = env["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"]
def get_local_pq_source_path(self):
return os.path.relpath(str(self.assets_path / self.pq_source))
def get_remote_pq_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.pq_source
return path
def get_local_csv_source_path(self):
return os.path.relpath(str(self.assets_path / self.csv_source))
def get_remote_csv_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.csv_source
return path
def custom_setup(self):
from mlrun import get_run_db
from mlrun.run import new_function
from mlrun.runtimes import RemoteSparkRuntime
self._init_env_from_file()
if not self.spark_image_deployed:
store, _ = store_manager.get_or_create_store(
self.get_remote_pq_source_path()
)
store.upload(
self.get_remote_pq_source_path(without_prefix=True),
self.get_local_pq_source_path(),
)
store, _ = store_manager.get_or_create_store(
self.get_remote_csv_source_path()
)
store.upload(
self.get_remote_csv_source_path(without_prefix=True),
self.get_local_csv_source_path(),
)
if not self.test_branch:
RemoteSparkRuntime.deploy_default_image()
else:
sj = new_function(
kind="remote-spark", name="remote-spark-default-image-deploy-temp"
)
sj.spec.build.image = RemoteSparkRuntime.default_image
sj.with_spark_service(spark_service="dummy-spark")
sj.spec.build.commands = ["pip install git+" + self.test_branch]
sj.deploy(with_mlrun=False)
get_run_db().delete_function(name=sj.metadata.name)
self.spark_image_deployed = True
def test_basic_remote_spark_ingest(self):
key = "patient_id"
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
assert measurements.status.targets[0].run_id is not None
def test_basic_remote_spark_ingest_csv(self):
key = "patient_id"
name = "measurements"
measurements = fs.FeatureSet(
name,
entities=[fs.Entity(key)],
engine="spark",
)
source = CSVSource(
"mycsv", path=self.get_remote_csv_source_path(), time_field="timestamp"
)
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
features = [f"{name}.*"]
vec = fs.FeatureVector("test-vec", features)
resp = fs.get_offline_features(vec)
df = resp.to_dataframe()
assert type(df["timestamp"][0]).__name__ == "Timestamp"
def test_error_flow(self):
df = pd.DataFrame(
{
"name": ["Jean", "Jacques", "Pierre"],
"last_name": ["Dubois", "Dupont", "Lavigne"],
}
)
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity("name")],
engine="spark",
)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fs.ingest(
measurements,
df,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
def test_ingest_to_csv(self):
key = "patient_id"
csv_path_spark = "v3io:///bigdata/test_ingest_to_csv_spark"
csv_path_storey = "v3io:///bigdata/test_ingest_to_csv_storey.csv"
measurements = fs.FeatureSet(
"measurements_spark",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_spark)]
fs.ingest(
measurements,
source,
targets,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
csv_path_spark = measurements.get_target_path(name="csv")
measurements = fs.FeatureSet(
"measurements_storey",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_storey)]
fs.ingest(
measurements,
source,
targets,
)
csv_path_storey = measurements.get_target_path(name="csv")
read_back_df_spark = None
file_system = fsspec.filesystem("v3io")
for file_entry in file_system.ls(csv_path_spark):
filepath = file_entry["name"]
if not filepath.endswith("/_SUCCESS"):
read_back_df_spark = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_spark is not None
read_back_df_storey = None
for file_entry in file_system.ls(csv_path_storey):
filepath = file_entry["name"]
read_back_df_storey = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_storey is not None
assert read_back_df_spark.sort_index(axis=1).equals(
read_back_df_storey.sort_index(axis=1)
)
@pytest.mark.parametrize("partitioned", [True, False])
def test_schedule_on_filtered_by_time(self, partitioned):
name = f"sched-time-{str(partitioned)}"
now = datetime.now()
path = "v3io:///bigdata/bla.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
pd.DataFrame(
{
"time": [
pd.Timestamp("2021-01-10 10:00:00"),
pd.Timestamp("2021-01-10 11:00:00"),
],
"first_name": ["moshe", "yosi"],
"data": [2000, 10],
}
).to_parquet(path=path, filesystem=fsys)
cron_trigger = "*/3 * * * *"
source = ParquetSource(
"myparquet", path=path, time_field="time", schedule=cron_trigger
)
feature_set = fs.FeatureSet(
name=name,
entities=[fs.Entity("first_name")],
timestamp_key="time",
engine="spark",
)
if partitioned:
targets = [
NoSqlTarget(),
ParquetTarget(
name="tar1",
path="v3io:///bigdata/fs1/",
partitioned=True,
partition_cols=["time"],
),
]
else:
targets = [
ParquetTarget(
name="tar2", path="v3io:///bigdata/fs2/", partitioned=False
),
NoSqlTarget(),
]
fs.ingest(
feature_set,
source,
run_config=fs.RunConfig(local=False),
targets=targets,
spark_context=self.spark_service,
)
# ingest starts every third minute and it can take ~150 seconds to finish.
time_till_next_run = 180 - now.second - 60 * (now.minute % 3)
sleep(time_till_next_run + 150)
features = [f"{name}.*"]
vec = fs.FeatureVector("sched_test-vec", features)
with fs.get_online_feature_service(vec) as svc:
resp = svc.get([{"first_name": "yosi"}, {"first_name": "moshe"}])
assert resp[0]["data"] == 10
assert resp[1]["data"] == 2000
pd.DataFrame(
{
"time": [
| pd.Timestamp("2021-01-10 12:00:00") | pandas.Timestamp |
#modules to initialize the API
#the API will run on two endpoints:
# Students - to get the user's course code
# Lessons - to access the timetable details for which the alerts will be sent
#with the two endpoints, if the API is located at www.api.com
# comm. with the Students and Lessons classes will be at
# www.api.com/students and www.api.com/classes respectively.
from flask import Flask, app
from flask_restful import Resource, Api, reqparse
import pandas as pd
import ast
#from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
#To create an endpoint, define a Python class with the name
#you want and connect it to the desired endpoint.
#we pass 'Resource' with the class definition for Flask to know that this is an endpoint.
class Students(Resource):
#using the GET method, we return the data stored in the specified file
def get(self):
data = pd.read_csv('studentDetails.csv') #reading the csv file
data = data.to_dict() #converting the dataframe to a dictionary
return{'data': data}, 200 #return the data along with the HTTP OK code
def post(self):
"""post student class code to the db"""
parser = reqparse.RequestParser()
#making required=True means that the argument is necessary in the post request
parser.add_argument('class_code', required = True)
args = parser.parse_args()
#read from the saved class codes
data = | pd.read_json('classCodes.json') | pandas.read_json |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import argparse
import configparser
import multiprocessing
import time
import datetime
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import tqdm
import statsmodels.stats.multitest as multitest
import snps
import genes
import interactions
import summary
import eqtls
import aFC
def parse_tissues(user_tissues, match_tissues, eqtl_project, db):
if eqtl_project:
sql = '''SELECT * FROM meta_eqtls WHERE project = '{}' '''.format(
eqtl_project)
else:
sql = '''SELECT * FROM meta_eqtls'''
df = pd.DataFrame()
with db.connect() as con:
df = pd.read_sql(sql, con=con)
db.dispose()
tissues = []
if match_tissues:
user_tissues = match_tissues[0]
if user_tissues:
matched_df = []
matched_tissues = []
to_omit = []
not_tissues = []
for u_tissue in user_tissues:
u_df = df[
(df['name'] == u_tissue) |
(df['tags'].str.contains(
r'\b{}\b'.format(u_tissue), case=False))
]
if u_df.empty:
if u_tissue.startswith('-'):
to_omit.append(u_tissue)
else:
not_tissues.append(u_tissue)
else:
matched_df.append(u_df)
matched_tissues.append(u_tissue)
error_msg = 'Program aborting:\n\t{}\nnot found in database.'
if (len(matched_df) == 0 or len(not_tissues) > 0) and len(to_omit) == 0:
print(error_msg.format('\n\t'.join(not_tissues)))
print('\nPlease use one of the following. ' +
'Tissue names are case sensitive:')
list_eqtl_tissues(db)
sys.exit()
user_df = pd.DataFrame()
if len(to_omit) > 0 and len(matched_tissues) == 0:
user_df = df
else:
user_df = pd.concat(matched_df)
if match_tissues:
for i in range(len(matched_tissues)):
user_df = user_df[
user_df['tags'].str.contains(
r'\b{}\b'.format(matched_tissues[i]), case=False)]
user_df = user_df.drop_duplicates()
for i in range(len(to_omit)):
user_df = user_df[
~user_df['tags'].str.contains(
r'\b{}\b'.format(to_omit[i][1:]), case=False)]
if len(user_df['project'].drop_duplicates()) > 1 and not eqtl_project:
# Ensure tissues are from same eQTL project
print('FATAL: eQTL tissues are from different projects. ',
'Add another tag to fine-tune match',
'or use \'--eqtl-project\' to specify project.')
print(user_df[['name', 'project']].to_string(index=False))
sys.exit()
tissues = user_df[['name', 'project']]
else: # Use GTEx database as default
tissues = df[df['project'] == 'GTEx'][[
'name', 'project']]
return tissues
def parse_hic(
match_tissues,
include_cell_line,
exclude_cell_line,
restriction_enzymes,
db):
''' user parameters -r, -n and -x.
Args:
restriction_enzymes: space-delimited list of restriction enzymes from
user. Limits program to Hic libraries restricted by specified enzyme.
include_cell_line: space-delimited list of cell_lines from -n.
exclude_cell_line: space-delimited list of cell_lines from -x
Returns:
hic_df: a dataframe columns(library, enzyme, rep_count)
'''
sql = '''SELECT library, tags, enzyme, rep_count FROM meta_hic'''
df = pd.DataFrame()
with db.connect() as con:
df = | pd.read_sql(sql, con=con) | pandas.read_sql |
################################################################################
# The contents of this file are Teradata Public Content and have been released
# to the Public Domain.
# <NAME> & <NAME> - April 2020 - v.1.1
# Copyright (c) 2020 by Teradata
# Licensed under BSD; see "license.txt" file in the bundle root folder.
#
################################################################################
# R and Python TechBytes Demo - Part 5: Python in-nodes with SCRIPT
# ------------------------------------------------------------------------------
# File: stoRFScoreMM.py
# ------------------------------------------------------------------------------
# The R and Python TechBytes Demo comprises of 5 parts:
# Part 1 consists of only a Powerpoint overview of R and Python in Vantage
# Part 2 demonstrates the Teradata R package tdplyr for clients
# Part 3 demonstrates the Teradata Python package teradataml for clients
# Part 4 demonstrates using R in-nodes with the SCRIPT and ExecR Table Operators
# Part 5 demonstrates using Python in-nodes with the SCRIPT Table Operator
################################################################################
#
# This TechBytes demo utilizes a use case to predict the propensity of a
# financial services customer base to open a credit card account.
#
# The present file is the Python scoring script to be used with the SCRIPT
# table operator, as described in the following use case 2 of the present demo
# Part 5:
#
# 2) Fitting and scoring multiple models
#
# We utilize the statecode variable as a partition to built a Random
# Forest model for every state. This is done by using SCRIPT Table Operator
# to run a model fitting script with a PARTITION BY statecode in the query.
# This creates a model for each of the CA, NY, TX, IL, AZ, OH and Other
# state codes, and perists the model in the database via CREATE TABLE AS
# statement.
# Then we run a scoring script via the SCRIPT Table Operator against
# these persisted Random Forest models to score the entire data set.
#
# For this use case, we build an analytic data set nearly identical to the
# one in the teradataml demo (Part 3), with one change as indicated by item
# (d) below. This is so we can demonstrate the in-database capability of
# simultaneously building many models.
# 60% of the analytic data set rows are sampled to create a training
# subset. The remaining 40% is used to create a testing/scoring dataset.
# The train and test/score datasets are used in the SCRIPT operations.
################################################################################
# File Changelog
# v.1.0 2019-10-29 First release
# v.1.1 2020-04-02 Added change log; no code changes in present file
################################################################################
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
import base64
###
### Read input
###
delimiter = '\t'
inputData = []
try:
line = input()
if line == '': # Exit if user provides blank line
pass
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
modelSerB64 = allArgs[-1]
except (EOFError): # Exit if reached EOF or CTRL-D
pass
while 1:
try:
line = input()
if line == '': # Exit if user provides blank line
break
else:
allArgs = line.split(delimiter)
inputData.append(allArgs[0:-2])
except (EOFError): # Exit if reached EOF or CTRL-D
break
#for line in sys.stdin.read().splitlines():
# line = line.split(delimiter)
# inputData.append(line)
###
### If no data received, gracefully exit rather than producing an error later.
###
if not inputData:
sys.exit()
## In the input information, all rows have the same number of column elements
## except for the first row. The latter also contains the model info in its
## last column. Isolate the serialized model from the end of first row.
#modelSerB64 = inputData[0][-1]
###
### Set up input DataFrame according to input schema
###
# Know your data: You must know in advance the number and data types of the
# incoming columns from the database!
# For numeric columns, the database sends in floats in scientific format with a
# blank space when the exponential is positive; e.g., 1.0 is sent as 1.000E 000.
# The following input data read deals with any such blank spaces in numbers.
columns = ['cust_id', 'tot_income', 'tot_age', 'tot_cust_years', 'tot_children',
'female_ind', 'single_ind', 'married_ind', 'separated_ind',
'statecode', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind',
'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt',
'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt',
'q2_trans_cnt', 'q3_trans_cnt', 'q4_trans_cnt', 'SAMPLE_ID']
df = pd.DataFrame(inputData, columns=columns)
#df = pd.DataFrame.from_records(inputData, exclude=['nRow', 'model'], columns=columns)
del inputData
df['cust_id'] = pd.to_numeric(df['cust_id'])
df['tot_income'] = df['tot_income'].apply(lambda x: "".join(x.split()))
df['tot_income'] = pd.to_numeric(df['tot_income'])
df['tot_age'] = pd.to_numeric(df['tot_age'])
df['tot_cust_years'] = | pd.to_numeric(df['tot_cust_years']) | pandas.to_numeric |
import numpy as np
import pandas as pd
import time
import cv2
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init
from torch.autograd import Variable
from . models import Spatial_CNN
class TESLA(object):
def __init__(self):
super(TESLA, self).__init__()
def train(self, input,
use_cuda=False,
train_refine=True,
radius=3,
nChannel=100,
lr=0.1,
minLabels=20,
maxIter=30,
stepsize_sim=1,
stepsize_con=10,
threshold=1000,
plot_intermedium=False,
plot_dir="./"):
self.use_cuda=use_cuda
self.train_refine=train_refine
self.radius=radius
self.nChannel=nChannel
self.lr=lr
self.minLabels=minLabels
self.maxIter=maxIter
self.stepsize_sim=stepsize_sim
self.stepsize_con=stepsize_con
self.threshold=threshold
self.plot_intermedium=plot_intermedium
self.resize_height=input.shape[0]
self.resize_width=input.shape[1]
input = torch.from_numpy( np.array([input.transpose( (2, 0, 1) ).astype('float32')]) )
if use_cuda:
input = input.cuda()
input = Variable(input)
#--------------------------------------- Train ---------------------------------------
self.model = Spatial_CNN(input.size(1), nConv=2, nChannel=self.nChannel, kernel_size_list=[5, 5],stride_list=[1, 1], padding_list=[2, 2])
if use_cuda:
self.model.cuda()
# Similarity loss definition
loss_fn = torch.nn.CrossEntropyLoss()
# Continuity loss definition
loss_hpy = torch.nn.L1Loss(size_average = True)
loss_hpz = torch.nn.L1Loss(size_average = True)
HPy_target = torch.zeros(self.resize_height-1, self.resize_width, nChannel)
HPz_target = torch.zeros(self.resize_height, self.resize_width-1, nChannel)
if use_cuda:
HPy_target = HPy_target.cuda()
HPz_target = HPz_target.cuda()
optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=0.9)
label_colours = np.random.randint(255,size=(100,3))
start_time = time.time()
self.model.train()
for batch_idx in range(maxIter):
# forwarding
optimizer.zero_grad()
output = self.model( input )[ 0 ]
output = output.permute( 1, 2, 0 ).contiguous().view( -1, nChannel )
outputHP = output.reshape( (self.resize_height, self.resize_width, nChannel) )
HPy = outputHP[1:, :, :] - outputHP[0:-1, :, :]
HPz = outputHP[:, 1:, :] - outputHP[:, 0:-1, :]
lhpy = loss_hpy(HPy,HPy_target)
lhpz = loss_hpz(HPz,HPz_target)
_, target = torch.max( output, 1 )
img_target = target.data.cpu().numpy()
# Total number of clusters
nLabels = len(np.unique(img_target))
# Number of main clusters
mainLabels=(pd.Series(img_target).value_counts()>=threshold).sum()
#--------------------------Refine during training----------------------------------------
if train_refine:
pixel_num=pd.Series(img_target).value_counts()
main_clusters=pixel_num.index[pixel_num>=threshold].tolist()
minor_clusters=pixel_num.index[pixel_num<threshold].tolist()
b_refine = img_target.reshape( (self.resize_height, self.resize_width))
max_x, max_y=self.resize_width, self.resize_height
replace_map={}
for i in minor_clusters:
nbs=[]
xy=np.where(b_refine==i)
for j in range(len(xy[0])):
x, y=xy[0][j], xy[1][j]
nbs=nbs+b_refine[max(0,x-radius):min(max_x,x+radius+1),max(0,y-radius):min(max_y,y+radius+1)].flatten().tolist()
nbs_num= | pd.Series(nbs) | pandas.Series |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = | pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) | pandas.MultiIndex.from_tuples |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .plan_losses import PPC, PlanCost,get_leading_hint
from query_representation.utils import deterministic_hash,make_dir
from query_representation.viz import *
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import multiprocessing as mp
import random
from collections import defaultdict
import pandas as pd
import networkx as nx
import os
import pdb
def get_eval_fn(loss_name):
if loss_name == "qerr":
return QError()
elif loss_name == "abs":
return AbsError()
elif loss_name == "rel":
return RelativeError()
elif loss_name == "ppc":
return PostgresPlanCost()
elif loss_name == "plancost":
return SimplePlanCost()
elif loss_name == "flowloss":
return FlowLoss()
else:
assert False
class EvalFunc():
def __init__(self, **kwargs):
pass
def save_logs(self, qreps, errors, **kwargs):
result_dir = kwargs["result_dir"]
if result_dir is None:
return
if "samples_type" in kwargs:
samples_type = kwargs["samples_type"]
else:
samples_type = ""
resfn = os.path.join(result_dir, self.__str__() + ".csv")
res = pd.DataFrame(data=errors, columns=["errors"])
res["samples_type"] = samples_type
# TODO: add other data?
if os.path.exists(resfn):
res.to_csv(resfn, mode="a",header=False)
else:
res.to_csv(resfn, header=True)
def eval(self, qreps, preds, **kwargs):
'''
@qreps: [qrep_1, ...qrep_N]
@preds: [{},...,{}]
@ret: [qerror_1, ..., qerror_{num_subplans}]
Each query has multiple subplans; the returned list flattens it into a
single array. The subplans of a query are sorted alphabetically (see
_get_all_cardinalities)
'''
pass
def __str__(self):
return self.__class__.__name__
# TODO: stuff for saving logs
def fix_query(query):
# these conditions were needed due to some edge cases while generating the
# queries on the movie_info_idx table, but crashes pyscopg2 somewhere.
# Removing them shouldn't effect the queries.
bad_str1 = "mii2.info ~ '^(?:[1-9]\d*|0)?(?:\.\d+)?$' AND"
bad_str2 = "mii1.info ~ '^(?:[1-9]\d*|0)?(?:\.\d+)?$' AND"
if bad_str1 in query:
query = query.replace(bad_str1, "")
if bad_str2 in query:
query = query.replace(bad_str2, "")
return query
def _get_all_cardinalities(qreps, preds):
ytrue = []
yhat = []
for i, pred_subsets in enumerate(preds):
qrep = qreps[i]["subset_graph"].nodes()
keys = list(pred_subsets.keys())
keys.sort()
for alias in keys:
pred = pred_subsets[alias]
actual = qrep[alias]["cardinality"]["actual"]
if actual == 0:
actual += 1
ytrue.append(float(actual))
yhat.append(float(pred))
return np.array(ytrue), np.array(yhat)
class QError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
assert len(ytrue) == len(yhat)
assert 0.00 not in ytrue
assert 0.00 not in yhat
errors = np.maximum((ytrue / yhat), (yhat / ytrue))
self.save_logs(qreps, errors, **kwargs)
return errors
class AbsError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
errors = np.abs(yhat - ytrue)
return errors
class RelativeError(EvalFunc):
def eval(self, qreps, preds, **kwargs):
'''
'''
assert len(preds) == len(qreps)
assert isinstance(preds[0], dict)
ytrue, yhat = _get_all_cardinalities(qreps, preds)
# TODO: may want to choose a minimum estimate
# epsilons = np.array([1]*len(yhat))
# ytrue = np.maximum(ytrue, epsilons)
errors = np.abs(ytrue - yhat) / ytrue
return errors
class PostgresPlanCost(EvalFunc):
def save_logs(self, qreps, errors, **kwargs):
if "result_dir" not in kwargs:
return
result_dir = kwargs["result_dir"]
if result_dir is None:
return
sqls = kwargs["sqls"]
plans = kwargs["plans"]
opt_costs = kwargs["opt_costs"]
true_cardinalities = kwargs["true_cardinalities"]
est_cardinalities = kwargs["est_cardinalities"]
costs = errors
if "samples_type" in kwargs:
samples_type = kwargs["samples_type"]
else:
samples_type = ""
if "alg_name" in kwargs:
alg_name = kwargs["alg_name"]
else:
alg_name = "Est"
costs_fn = os.path.join(result_dir, self.__str__() + ".csv")
if os.path.exists(costs_fn):
costs_df = pd.read_csv(costs_fn)
else:
columns = ["qname", "join_order", "exec_sql", "cost"]
costs_df = pd.DataFrame(columns=columns)
cur_costs = defaultdict(list)
for i, qrep in enumerate(qreps):
# sql_key = str(deterministic_hash(qrep["sql"]))
# cur_costs["sql_key"].append(sql_key)
qname = os.path.basename(qrep["name"])
cur_costs["qname"].append(qname)
joinorder = get_leading_hint(qrep["join_graph"], plans[i])
cur_costs["join_order"].append(joinorder)
cur_costs["exec_sql"].append(sqls[i])
cur_costs["cost"].append(costs[i])
cur_df = | pd.DataFrame(cur_costs) | pandas.DataFrame |
import cbsodata
import math
import pandas
import matplotlib.pyplot as plt
from functools import reduce
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
def download(identifier: str):
"""Download a dataset from the CBS odata portal."""
# Prepare download directory
download_directory = Path("data/downloaded")
download_directory.mkdir(parents=True, exist_ok=True)
# Check if dataset was previously downloaded
file_path = download_directory / (identifier + ".csv")
if not file_path.exists():
print(f"Downloading \"{identifier}\" to \"{file_path}\".")
# Download dataset
data = cbsodata.get_data(identifier)
data_frame = pandas.DataFrame(data)
data_frame.to_csv(file_path, index=False)
def clean(identifier: str):
"""Clean a dataset by renaming and joining its columns."""
# Prepare download directory
download_directory = Path("data/downloaded")
download_directory.mkdir(parents=True, exist_ok=True)
# Prepare clean directory
clean_directory = Path("data/cleaned")
clean_directory.mkdir(parents=True, exist_ok=True)
# Check if dataset was previously cleaned
target_path = clean_directory / (identifier + ".csv")
if not target_path.exists():
print(f"Cleaning \"{identifier}\" to \"{target_path}\".")
# Clean dataset
source_path = download_directory / (identifier + ".csv")
data_frame = pandas.read_csv(source_path)
# Remove whitespace
for column in data_frame.columns:
if pandas.api.types.is_string_dtype(data_frame[column]):
data_frame[column] = data_frame[column].str.strip()
# Remove and rename columns
columns = {
# General
"SoortRegio_2": "type",
"WijkenEnBuurten": "name",
"Codering_3": "code",
# Price
"GemiddeldeWoningwaarde_35": "house_worth",
# Urbanity
"MateVanStedelijkheid_105": "urbanity",
# Safety
# "AantalInwoners_5": "inhabitants",
# "TotaalDiefstalUitWoningSchuurED_78": "theft",
# "VernielingMisdrijfTegenOpenbareOrde_79": "destruction",
# "GeweldsEnSeksueleMisdrijven_80": "violence",
# Healthcare
"AfstandTotHuisartsenpraktijk_5": "distance_to_general_practitioner",
"AfstandTotHuisartsenpost_9": "distance_to_general_practice",
"AfstandTotZiekenhuis_11": "distance_to_hospital",
# Education
# "AfstandTotSchool_60": "distance_to_school_1",
# "AfstandTotSchool_64": "distance_to_school_2",
# "AfstandTotSchool_68": "distance_to_school_3",
# "AfstandTotSchool_72": "distance_to_school_4",
"AfstandTotSchool_98": "distance_to_school",
# Transit
"AfstandTotBelangrijkOverstapstation_91": "distance_to_public_transport",
# Required facilities
"AfstandTotApotheek_10": "distance_to_pharmacy",
"AfstandTotGroteSupermarkt_24": "distance_to_grocery_store",
"AfstandTotKinderdagverblijf_52": "distance_to_daycare",
"AfstandTotBibliotheek_92": "distance_to_library",
}
columns = {key: columns[key] for key in data_frame.columns if key in columns}
data_frame = data_frame[columns.keys()]
data_frame.rename(columns=columns, inplace=True)
# Rename types
translations = {
"Land": "country",
"Gemeente": "municipality",
"Wijk": "district",
"Buurt": "neighbourhood",
}
data_frame["type"].replace(translations.keys(), translations.values(), inplace=True)
# Store clean dataset
data_frame.to_csv(target_path, index=False)
def join(identifiers: [str]):
"""Join a group of datasets together into a single group,"""
# Prepare clean directory
clean_directory = Path("data/cleaned")
clean_directory.mkdir(parents=True, exist_ok=True)
# Prepare join directory
join_directory = Path("data/joined")
join_directory.mkdir(parents=True, exist_ok=True)
# Check if datasets were previously joined
target_path = join_directory / ("_".join(identifiers) + ".csv")
if not target_path.exists():
identifier_names = ", ".join(map(lambda identifier: f"\"{identifier}\"", identifiers))
print(f"Joining {identifier_names} to \"{target_path}\".")
def load(identifier: str) -> pandas.DataFrame:
"""Load the dataset associated with the given identifier."""
source_path = clean_directory / (identifier + ".csv")
return pandas.read_csv(source_path)
def merge(accumulator: pandas.DataFrame, other: pandas.DataFrame) -> pandas.DataFrame:
"""Merge the given datasets into a single dataset via outer joining on shared columns."""
columns = list(set(accumulator.columns).intersection(set(other.columns)))
return pandas.merge(accumulator, other, how="outer", on=columns)
# Join datasets
data_frame = reduce(merge, map(load, identifiers))
data_frame.to_csv(target_path, index=False)
def preprocess(identifiers: [str]):
"""Preprocess joined dataset."""
# Prepare join directory
join_directory = Path("data/joined")
join_directory.mkdir(parents=True, exist_ok=True)
# Prepare preprocess directory
preprocess_directory = Path("data/preprocessed")
preprocess_directory.mkdir(parents=True, exist_ok=True)
target_path = preprocess_directory / ("_".join(identifiers) + ".csv")
if not target_path.exists():
# Load dataset
source_path = join_directory / ("_".join(identifiers) + ".csv")
data_frame = pandas.read_csv(source_path)
print(f"Preprocessing \"{source_path}\" to \"{target_path}\".")
# Fill in missing values
region_types = ["country", "municipality", "district", "neighbourhood"]
def fill_top_down(column):
values = list()
for index, row in data_frame.iterrows():
level = region_types.index(row["type"])
values = values[:level]
value = row[column]
if math.isnan(value):
value = values[-1]
data_frame.loc[index, column] = value
values.append(value)
fill_top_down("house_worth")
fill_top_down("urbanity")
fill_top_down("distance_to_general_practitioner")
fill_top_down("distance_to_general_practice")
fill_top_down("distance_to_hospital")
fill_top_down("distance_to_school")
fill_top_down("distance_to_public_transport")
fill_top_down("distance_to_pharmacy")
fill_top_down("distance_to_grocery_store")
fill_top_down("distance_to_daycare")
fill_top_down("distance_to_library")
# Normalize columns
to_be_normalized = [
"house_worth",
"urbanity",
"distance_to_general_practitioner",
"distance_to_general_practice",
"distance_to_hospital",
"distance_to_school",
"distance_to_public_transport",
]
for column in to_be_normalized:
if pandas.api.types.is_numeric_dtype(data_frame[column]):
scaler = MinMaxScaler()
data_frame[[column]] = scaler.fit_transform(data_frame[[column]])
# Combine data columns
# Price
data_frame["price"] = 1.0 - data_frame["house_worth"]
data_frame.drop(columns=["house_worth"], inplace=True)
# Urbanity
data_frame["urbanity"] = 1.0 - data_frame["urbanity"]
# Safety
# data_frame["safety"] = 1.0 / (data_frame["theft"] + data_frame["destruction"] + data_frame["violence"])
# data_frame["safety"].fillna(1)
# data_frame.drop(columns=["theft", "destruction", "violence"], inplace=True)
# Healthcare
healthcare_columns = [
"distance_to_general_practitioner",
"distance_to_general_practice",
"distance_to_hospital"
]
def weighted_minimum(row):
return 1.0 - min(row[0], row[1] ** 2, row[2] ** 3)
data_frame["healthcare"] = data_frame[healthcare_columns].apply(weighted_minimum, axis=1)
data_frame.drop(columns=healthcare_columns, inplace=True)
# Education
data_frame["education"] = 1.0 - data_frame["distance_to_school"]
data_frame.drop(columns=["distance_to_school"], inplace=True)
# Public Transport
data_frame["public_transport"] = 1.0 - data_frame["distance_to_public_transport"]
data_frame.drop(columns=["distance_to_public_transport"], inplace=True)
# Pharmacy
data_frame["pharmacy"] = data_frame["distance_to_pharmacy"] < 5.0
data_frame.drop(columns=["distance_to_pharmacy"], inplace=True)
# Pharmacy
data_frame["grocery_store"] = data_frame["distance_to_grocery_store"] < 5.0
data_frame.drop(columns=["distance_to_grocery_store"], inplace=True)
# Pharmacy
data_frame["daycare"] = data_frame["distance_to_daycare"] < 5.0
data_frame.drop(columns=["distance_to_daycare"], inplace=True)
# Pharmacy
data_frame["library"] = data_frame["distance_to_library"] < 5.0
data_frame.drop(columns=["distance_to_library"], inplace=True)
# Distribute by ranking
for column in data_frame.columns:
if pandas.api.types.is_float_dtype(data_frame[column]):
values = sorted(data_frame[column])
latest_value = None
latest_rank = 0
next_rank = 0
max_rank = len(data_frame[column])
ranks = []
for value in values:
if latest_value is None or latest_value < value:
latest_rank = next_rank
ranks.append(latest_rank / max_rank)
next_rank += 1
rank_dict = {value: rank for value, rank in zip(values, ranks)}
data_frame[column] = data_frame[column].map(rank_dict)
# Create histograms for distribution analysis
histogram_directory = Path("data/histograms")
histogram_directory.mkdir(parents=True, exist_ok=True)
for column in data_frame.columns:
if pandas.api.types.is_float_dtype(data_frame[column]):
figure, ax = plt.subplots()
data_frame[column].hist(bins=20, legend=True, ax=ax)
figure.savefig(histogram_directory / (column + ".png"))
# Store clean dataset
data_frame.to_csv(target_path, index=False)
def split(identifiers: [str]):
"""Preprocess joined dataset."""
# Prepare preprocess directory
preprocess_directory = Path("data/preprocessed")
preprocess_directory.mkdir(parents=True, exist_ok=True)
# Prepare join directory
split_directory = Path("data/split")
split_directory.mkdir(parents=True, exist_ok=True)
target_directory = split_directory / "_".join(identifiers)
if not target_directory.exists():
print(f"Splitting datasets to \"{target_directory}\".")
target_directory.mkdir(exist_ok=True, parents=True)
# Load dataset
source_path = preprocess_directory / ("_".join(identifiers) + ".csv")
data_frame = | pandas.read_csv(source_path) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas.testing import assert_series_equal
from src.shared import _create_group_ids
from src.shared import _determine_number_of_groups
from src.shared import _expand_or_contract_ids
from src.shared import create_groups_from_dist
from src.shared import draw_groups
@pytest.fixture
def df():
df = pd.DataFrame(
data={
"age": [15, 25, 30, 70, 20, 25],
"region": ["A", "B", "B", "B", "A", "A"],
},
columns=["age", "region"],
)
return df
def test_draw_groups(df):
res = draw_groups(
df=df,
query="18 <= age <= 65",
assort_bys=["region"],
n_per_group=20,
seed=393,
)
expected = np.array([-1, 1, 1, -1, 0, 0])
assert_array_equal(res.to_numpy(), expected)
def test_determine_number_of_groups():
nobs = 40
dist = pd.Series({1: 0.5, 2: 0.25, 5: 0.25})
expected = pd.Series({1: 20, 2: 5, 5: 2})
res = _determine_number_of_groups(nobs=nobs, dist=dist)
assert_series_equal(res, expected, check_dtype=False)
def test_create_group_ids():
assort_by_vals = ("hello", "world")
nr_of_groups = | pd.Series({1: 20, 2: 5, 5: 2}) | pandas.Series |
#-*- coding:utf-8 -*-
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.naive_bayes import MultinomialNB
df = | pd.read_csv('dataset/raw_training.csv') | pandas.read_csv |
import os
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from imblearn.over_sampling import SMOTE
DATA_PATH = '../cell-profiler/measurements'
def load_data(filename, data_path=DATA_PATH):
"""
Read a csv file.
"""
csv_path = os.path.join(data_path, filename)
return pd.read_csv(csv_path)
def save_data(df, filename, data_path=DATA_PATH):
"""
Write into a csv file.
"""
csv_path = os.path.join(data_path, filename)
df.to_csv(csv_path)
def parse_metadata(df):
"""
Parse metadata tp extract information about experimental conditions
and drop unnecessary columns.
"""
# Drop unnecessary columns
columns = ['Metadata_Frame', 'Metadata_Series',
'Metadata_Stiffness.1', 'Metadata_Combination.1',
'Metadata_Well.1', 'Metadata_Site.1'
]
df.drop(columns, axis=1, inplace=True)
# Rename columns containing metadata
df.rename(columns={'ImageNumber' : 'image', 'ObjectNumber' : 'object',
'Metadata_Stiffness' : 'stiffness',
'Metadata_Combination' : 'combination',
'Metadata_Well' : 'well',
'Metadata_Site' : 'site'}, inplace=True)
# Change types and create cell and image labels
df = create_label(df)
df = create_label(df, col_name='image', per_cell=False)
def as_stiff_type(x):
"""
Convert stiffness values to a custom categorical type.
"""
# Create a categorical data type for stiffness
#stiff_type = CategoricalDtype(categories=['0.2', '0.5', '2.0', '8.0', '16.0', '32.0', '64.0'], ordered=True)
stiff_type = CategoricalDtype(categories=['0.2', '2.0', '16.0', '32.0', '64.0'], ordered=True)
return x.astype(stiff_type)
def create_label(df, col_name='label', per_cell=True):
"""
Create a unique label for each observation.
Labels are created as follows:
For each site: stiffness-combination-well-site
For each cell: stiffness-combination-well-site-object
"""
# Convert to non-numeric values
columns = ['image', 'object', 'stiffness', 'combination', 'well', 'site']
existing_columns = [col for col in columns if col in df.columns]
df[existing_columns] = df[existing_columns].astype(str)
df.stiffness = as_stiff_type(df.stiffness)
# Create a unique label for each cell
if per_cell:
df[col_name] = df[['stiffness', 'combination', 'well', 'site', 'object']].apply(lambda x: '-'.join(x), axis=1)
else:
df[col_name] = df[['stiffness', 'combination', 'well', 'site']].apply(lambda x: '-'.join(x), axis=1)
return df
def rename_columns(df):
"""
Rename columns containing features measured by cell profiler.
"""
# Convert to lower case
df.columns = [col.lower() for col in df.columns]
# Rename channels
df.columns = [col.replace('_origdapi', '_dapi', 1) for col in df.columns]
df.columns = [col.replace('_origwga', '_wga', 1) for col in df.columns]
df.columns = [col.replace('_origker', '_ker', 1) for col in df.columns]
df.columns = [col.replace('_origvim', '_vim', 1) for col in df.columns]
df.columns = [col.replace('_origecad', '_ecad', 1) for col in df.columns]
# Coordinates in X
df.columns = [col.replace('_x', 'X', 1) for col in df.columns]
# Coordinates in Y
df.columns = [col.replace('_y', 'Y', 1) for col in df.columns]
# Coordinates in Z
df.columns = [col.replace('_z', 'Z', 1) for col in df.columns]
# Shape features
df.columns = [col.replace('areashape_', '', 1) for col in df.columns]
# Zernike features
df.columns = [col.replace('areashapeZernike', 'zernike', 1) for col in df.columns]
# Intensity features
df.columns = [col.replace('intensity_', '', 1) for col in df.columns]
# Location
df.columns = [col.replace('location_', 'loc_', 1) for col in df.columns]
# Texture
new_names = []
for col in df.columns:
if 'texture_' in col:
new_names.append(col.replace('_3', '', 1))
else:
new_names.append(col)
df.columns = new_names
df.columns = [col.replace('texture_', '', 1) for col in df.columns]
print("The are no duplicated column names:", len(list(df.columns)) == len(set(list(df.columns))))
def merge_datasets(df1, df2, suffixes=[]):
"""
Merge two datasets on a set of metedata columns.
"""
common_cols = ['label', 'image', 'object', 'stiffness', 'combination', 'well', 'site']
if len(suffixes)==2:
return pd.merge(df1, df2, how='outer', on=common_cols, suffixes=suffixes)
elif len(suffixes)==1:
new_names=[]
for col in df2.columns:
if col in common_cols:
new_names.append(col)
else:
new_names.append(col + suffixes[0])
df2.columns = new_names
return pd.merge(df1, df2, how='outer', on=common_cols)
else:
return pd.merge(df1, df2, how='outer', on=common_cols)
def move_column(df, column_name, loc):
"""
Move a columns in front of the dataframe.
"""
columns = df.columns.tolist()
columns.insert(loc, columns.pop(columns.index(column_name)))
return df.reindex(columns=columns, copy=False)
def merge_neighbors(df, df_n):
cols = ['ImageNumber', 'Location_Center_X', 'Location_Center_Y']
# Round centre locations
df_m = df[cols].copy()
loc_cols = ['Location_Center_X', 'Location_Center_Y']
df_m[loc_cols] = df_m[loc_cols].round()
df_n[loc_cols] = df_n[loc_cols].round()
# Merge dataframes
neighb = pd.merge(df_m, df_n, how='inner', on=cols)
assert neighb.shape[0] == df_m.shape[0]
# Assign values to the original dataframe
df.loc[:, 'Neighbors_AngleBetweenNeighbors_3' : 'Number_Object_Number'] = \
neighb.loc[:, 'Neighbors_AngleBetweenNeighbors_3' : 'Number_Object_Number']
# Delete duplicated columns
distances = [col.split('_')[2] for col in neighb.columns if 'Neighbors_NumberOfNeighbors' in col]
if len(distances) > 1:
dupl_cols = ['Neighbors_AngleBetweenNeighbors_',
'Neighbors_FirstClosestDistance_',
'Neighbors_FirstClosestObjectNumber_',
'Neighbors_SecondClosestDistance_',
'Neighbors_SecondClosestObjectNumber_']
dupl_cols = [col + distances[1] for col in dupl_cols]
df.drop(dupl_cols, axis=1, inplace=True)
def import_cell_data(data_path=DATA_PATH, suffix='', cytoplasm=False, biomarkers=False, neighbours=False):
"""
Import all the data and then
call functions to parse metadata,
rename and rearrange columns and merge datasets.
"""
cells = load_data(filename=suffix + 'Cells.csv')
nuclei = load_data(filename=suffix + 'Nuclei.csv')
info = load_data(filename=suffix + 'Image.csv')
# Check that dataframes contain the correct number of cells
n_cells = info.Count_Cells.sum()
print('Morphology was measured for {} cells.\n'.format(n_cells))
if (cells.shape[0] == n_cells) and (nuclei.shape[0] == n_cells):
print('The numbers of cells and nuclei correspond to each other.\n')
else:
print('Found {} cells and {} nuclei'.format(cells.shape[0], nuclei.shape[0]))
if neighbours:
neighbors = load_data(filename=suffix + 'Neighbours.csv')
# Merge neighbours
merge_neighbors(cells, neighbors)
# Parse and clean metadata
parse_metadata(cells)
parse_metadata(nuclei)
# Rename columns
rename_columns(cells)
rename_columns(nuclei)
# Merge two datasets
measurements = merge_datasets(cells, nuclei, suffixes=['_cell', '_nucl'])
if cytoplasm == True:
cytoplasm = load_data(filename=suffix + 'Cytoplasm.csv')
print('Cytoplasm measurements were taken for {} cells.\n'.format(cytoplasm.shape[0]))
# Parse and clean metadata
parse_metadata(cytoplasm)
# Rename columns
rename_columns(cytoplasm)
# Merge with the main dataset
measurements = merge_datasets(measurements, cytoplasm, suffixes=['_cyto'])
###
#measurements = measurements.loc[measurements.combination == "B"]
###
if biomarkers:
print('Reading the dataset with E-cadherin...')
df = load_data(filename='Ecadherin_Cells.csv')
print('E-cadherin was measured for {} cells.\n'.format(df.shape[0]))
# Parse and clean metadata
parse_metadata(df)
# Rename columns
rename_columns(df)
# Merge with the main dataset
measurements = merge_datasets(measurements, df)
print('Reading the dataset with Vimentin and Cytokeratins...')
df = load_data(filename='Biomarkers_Cells.csv')
print('Vimentin and Cytokeratins were measured for {} cells.\n'.format(df.shape[0]))
# Parse and clean metadata
parse_metadata(df)
# Rename columns
rename_columns(df)
# Merge with the main dataset
measurements = merge_datasets(measurements, df)
# Move "label" column to front
measurements = move_column(measurements, 'label', 0)
print("\nFull dataset has shape:", measurements.shape)
return measurements
def split_dataset(df, channels=None):
"""
Split a dataset in two by channel name.
"""
subsets = []
for selected in channels:
subset = df.copy()
# Channels except for the selected
channels_to_drop = list(set(channels) - set([selected]))
for channel in channels_to_drop:
# Drop the columns with specified channel
cols = [col for col in df.columns if channel in col]
subset.drop(cols, axis=1, inplace=True)
# Rename channel suffix in the new dataset
subset.columns = [col.replace('_' + selected, '', 1) for col in subset.columns]
subsets.append(subset)
return subsets
def dist(df):
"""
Calculate Euclidean distance on a dataframe.
Input columns are arranged as x0, x1, y0, y1.
"""
return np.sqrt((df.iloc[:,0] - df.iloc[:,2])**2 + (df.iloc[:,1] - df.iloc[:,3])**2)
def transform_location(df):
"""
Calculate the following distances and drop columns with location measurements.
* centerX, centerY are coordinates of the fartherst point from any edge
(calculated using scipy.ndimage.center_of_mass);
* loc_centerX, loc_centerY are average coordinates for the binary image
(calculated using scipy.ndimage.mean);
* fartherstpoint is a distance between the two points
relative to cell and nuclear boundaries, respectively;
* loc_maxintensityX, loc_maxintensityY are coordinates of the pixel
with the maximum intensity within the object;
* maxintdisplacement is a distance between this pixel and loc_center;
* nucleusshift is a distances between centres of mass of a cell and its nucleus.
"""
# Drop duplicate columns with location
df.drop(['loc_centerX', 'loc_centerY'], axis=1, inplace=True, errors='ignore')
# Drop "centermassintensity" columns
drop_cols = [col for col in df.columns if 'centermassintensity' in col]
df.drop(drop_cols, axis=1, inplace=True)
# Calculate distances between centres of a binary image
df['fartherstpoint_cell'] = dist(df.loc[:, ['centerX_cell',
'centerY_cell',
'loc_centerX_cell',
'loc_centerY_cell']])
df['fartherstpoint_nucl'] = dist(df.loc[:, ['centerX_nucl',
'centerY_nucl',
'loc_centerX_nucl',
'loc_centerY_nucl']])
df['nucleusshift'] = dist(df.loc[:, ['centerX_cell',
'centerY_cell',
'centerX_nucl',
'centerY_nucl']])
# Calculate max intensity displacement
suffix = ['_'.join(col.split('_')[2:]) for col in df.columns if 'loc_maxintensity' in col]
for s in set(suffix):
maxint_cols = [col for col in df.columns if 'loc_maxintensity' in col and s in col]
if 'dapi' in s or 'nucl' in s:
cols = ['loc_centerX_nucl','loc_centerY_nucl']
cols.extend(maxint_cols)
else:
cols = ['loc_centerX_cell','loc_centerY_cell']
cols.extend(maxint_cols)
new_col = 'maxintdisplacement_' + s
df[new_col] = dist(df.loc[:, cols])
# All location measurements are in absolute coordinates and should be dropped
drop_cols = [col for col in df.columns if 'loc' in col]
df.drop(drop_cols, axis=1, inplace=True)
drop_cols = [col for col in df.columns if 'center' in col]
df.drop(drop_cols, axis=1, inplace=True)
# Move mass displacement columns to the end
mass_cols = [col for col in df.columns if 'mass' in col]
for col in mass_cols:
df = move_column(df, col, df.columns.size)
return df
def clean_data(df):
"""
Clean the dataframe by dropping variable with zero variance,
uninformative/duplicated columns and location measurements.
"""
# Check if there are any missing values
#assert df.isnull().sum().sum() == 0
print("Initial shape is:", df.shape)
# Calculate summary statistics and drop features with zero variance
stats = df.describe()
zerovar_cols = stats.columns[stats.loc['std', :] == 0]
print("Features with zero variance:\n", zerovar_cols)
df.drop(zerovar_cols, axis=1, inplace=True)
# Drop columns with object numbers
numbers_cols = [col for col in df.columns if 'object' in col and 'number' in col]
df.drop(numbers_cols, axis=1, inplace=True)
# Drop columns with parent numbers
parent_cols = [col for col in df.columns if 'parent' in col or 'children' in col]
df.drop(parent_cols, axis=1, inplace=True)
# Transform location measurements
df = transform_location(df)
# Transform orientation angle from [-pi/2, pi/2] to [0, pi]
angle_cols = [col for col in df.columns if 'orientation' in col]
df[angle_cols] += 90
print("\nAfter cleaning the dataset has {} rows and {} columns.\n".format(df.shape[0], df.shape[1]))
return df
def select_features(df, filename='selected_columns.txt', cols='geom'):
"""
Load the list of manually selected columns
and return a copy of the dataset containing only
those columns
"""
with open(filename, 'r') as file:
selected_cols = [line.rstrip('\n') for line in file]
if cols == 'geom':
geom_selected_cols = [col for col in selected_cols if 'dapi' not in col and 'wga' not in col]
return df[geom_selected_cols]
return pd.concat([df.loc[:, 'label' : 'well'], df[selected_cols]], axis=1)
def undersample(df, n_samples):
"""
Perform undersampling of majority classes
by randomly selecting n_samples cells
for each stiffness level.
"""
df_under = pd.DataFrame(columns=df.columns)
for s in df.stiffness.unique():
if (s == "8.0") or (s == "32.0") :
df_under = pd.concat([df_under, df[df.stiffness == s]], axis=0)
else:
df_under = pd.concat([df_under, df[df.stiffness == s].sample(n_samples)], axis=0)
print("Undersampling. The balanced dataset has shape", df_under.shape)
return df_under.reset_index(drop=True)
def smote(X, y, as_df=True):
"""
Synthesise new observations to have equal
number of cells for each stiffness value.
"""
smote = SMOTE()
X_sm, y_sm = smote.fit_sample(X, y)
print("\nAfter synthesing new observations the balanced dataset has {} rows and {} columns.\n"
.format(X_sm.shape[0], X_sm.shape[1]))
if as_df:
df_smote = pd.concat([pd.DataFrame(X_sm, columns=X.columns),
as_stiff_type( | pd.DataFrame(y_sm, columns=['stiffness']) | pandas.DataFrame |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf( | StringIO(test) | pandas.compat.StringIO |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = | pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True) | pandas.merge |
import pandas as pd
import pymmwr as pm
import datetime
import warnings
import io
import requests
warnings.simplefilter(action='ignore')
def read_fips_codes(filepath):
# read file
fips_codes = pd.read_csv(filepath)
# take state code from all fips codes
fips_codes['state_abbr'] = fips_codes['location'].str[:2]
# match state abbrevaition with state fips code
fips_codes['state_abbr'] = fips_codes['state_abbr'].apply(lambda x: fips_codes[fips_codes.location ==x].abbreviation.tolist()[0] if str(x) in fips_codes['location'].tolist() else 'NA')
# only output "location (fips code)","location_name","(state) abbreviation"
fips_codes = fips_codes.drop('abbreviation',axis=1)
fips_codes.rename({'state_abbr': 'abbreviation'}, axis=1, inplace=True)
return fips_codes
def get_epi_data(date):
# The format
format_str = '%m/%d/%y'
dt = datetime.datetime.strptime(date, format_str).date()
epi = pm.date_to_epiweek(dt)
return epi.year, epi.week, epi.day
def pre_process (df):
# convert matrix to repeating row format
df_truth = df.unstack()
df_truth = df_truth.reset_index()
# get epi data from date
df_truth['year'], df_truth['week'], df_truth['day'] = \
zip(*df_truth['level_0'].map(get_epi_data))
return df_truth
def get_byday (df_truth):
# only output "location", "epiweek", "value"
df_truth = df_truth.drop(['location_long'], axis=1)
df_byday = df_truth.rename(columns={"level_0": "date"})
# select columns
df_byday = df_byday[["date", "location", "location_name", "value"]]
# ensure value column is integer
df_byday['value'] = df_byday['value'].astype(int)
# change to yyyy/mm/dd format
df_byday['date'] = | pd.to_datetime(df_byday['date']) | pandas.to_datetime |
import numpy as np
from numpy.random import randn
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_frame(frame, name):
frame_result = getattr(frame.ewm(com=10), name)()
assert isinstance(frame_result, DataFrame)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewma_span_com_args(series):
A = series.ewm(com=9.5).mean()
B = series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20)
msg = "Must pass one of comass, span, halflife, or alpha"
with pytest.raises(ValueError, match=msg):
series.ewm().mean()
def test_ewma_halflife_arg(series):
A = series.ewm(com=13.932726172912965).mean()
B = series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm()
def test_ewm_alpha(arr):
# GH 10789
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
| tm.assert_series_equal(a, c) | pandas._testing.assert_series_equal |
import os
import json
import datetime
import multiprocessing
import random
import copy
import time
import warnings
import pandas as pd
import numpy as np
from datetime import date, timedelta
from pathlib import Path
from models.common.mit_buildings import MITBuildings
from models.common.to_precision import sig_fig_formatted
from analyses.common.analysis import Analysis, add_empty_buildings, sort_dict
def read_row(pair_person_dates):
'''
function outputs a pd series (row). this row is then used by a multiprocessing pool join to
make a pandas dataframe. this is done as a multprocessed fashion to speed up convering the json into a pandas dataframe.
input is a list [person, dates] where:
person: list of size <num. n_samples of people samples> which contains all the trajectory samples
(i.e. input_samples['trajectory']['samples'])
dates: list of size <num. of days n_days_to_simulate> of identically copied lists of dates from the samples
(i.e. input_samples['trajectory']['dates'])
'''
building_stays_local = pd.DataFrame()
coming_into_campus_local = pd.DataFrame()
leaving_campus_local = pd.DataFrame()
person = pair_person_dates[0]
dates = pair_person_dates[1]
id = pair_person_dates[2]
for i, day_sample in enumerate(person):
if len(day_sample) == 0:
continue # sampled person did not go to work
else:
for stay in day_sample:
if stay['stay_type'] == 'on_campus_inside':
building_stays_local = building_stays_local.append(
{
'person_id': id,
'building': stay['building'],
'date': dates[i],
'start_time': dates[i] + ' ' + stay['start_time'],
'end_time': dates[i] + ' ' + stay['end_time']
}, ignore_index=True)
# getting arrival time into campus to calculate campus level inflow down the line
elif stay['stay_type'] == 'commute' and 'arrival_time' in stay.keys():
coming_into_campus_local = coming_into_campus_local.append(
{
'person_id': id,
'commute_type': stay['commute_type'],
'date': dates[i],
'arrival_time': dates[i] + ' ' + stay['arrival_time']
}, ignore_index=True)
# getting arrival time into campus to calculate campus level inflow down the line
elif stay['stay_type'] == 'commute' and 'departure_time' in stay.keys():
leaving_campus_local = leaving_campus_local.append(
{
'person_id': id,
'commute_type': stay['commute_type'],
'date': dates[i],
'departure_time': dates[i] + ' ' + stay['departure_time']
}, ignore_index=True)
return {'building_stays_local': building_stays_local,
'coming_into_campus_local': coming_into_campus_local,
'leaving_campus_local': leaving_campus_local
}
class ScenarioCampusStatistics(Analysis):
def run(self, input_samples: dict, input_analyses: dict, uuid_prefix: str) -> dict:
start_global = time.time()
np.random.seed(self.analysis_parameters['random_seed'])
if 'unittest_mode' not in self.analysis_parameters:
self.unittest_mode = False
else:
self.unittest_mode = self.analysis_parameters['unittest_mode']
self.n_bootstraps = self.analysis_parameters['n_bootstraps']
self.percentiles_list = self.analysis_parameters['percentiles']
dates = input_samples['trajectory']['dates']
all_person_samples = input_samples['trajectory']['samples']
self.num_samples = len(all_person_samples)
self.building_data = MITBuildings()
self.total_population_size = input_samples['trajectory']['total_population_size']
cpu_count = multiprocessing.cpu_count()
print('cpu count on machine:', cpu_count)
dates_copies = [dates for copies in range(len(all_person_samples))]
person_id_list = list(range(self.num_samples))
input_to_multiprocessing = list(zip(all_person_samples, dates_copies, person_id_list))
# read_row(input_to_multiprocessing[0]) #DEBUG for multiproc
start_reading = time.time()
print('creating reading multiprocessing pool..', cpu_count)
pool = multiprocessing.Pool(cpu_count)
print('reading..')
reading_stays_list = pool.map(read_row, input_to_multiprocessing)
pool.close()
pool.join()
print('reading finished, closed pool, took', time.time() - start_reading, 'sec')
building_stays = pd.concat([ps['building_stays_local'] for ps in reading_stays_list])
campus_arrivals = pd.concat([ps['coming_into_campus_local'] for ps in reading_stays_list])
campus_depatures = pd.concat([ps['leaving_campus_local'] for ps in reading_stays_list])
# print(building_stays.head()) #DEBUG
# print(building_stays.shape) #DEBUG
del input_to_multiprocessing
print('converting timestamps')
building_stays['date'] = pd.to_datetime(building_stays['date'])
building_stays['start_time'] = pd.to_datetime(building_stays['start_time'])
building_stays['end_time'] = pd.to_datetime(building_stays['end_time'])
self.building_stays = building_stays
campus_arrivals['date'] = pd.to_datetime(campus_arrivals['date'])
campus_arrivals['arrival_time'] = pd.to_datetime(campus_arrivals['arrival_time'])
self.campus_arrivals = campus_arrivals
campus_depatures['date'] = pd.to_datetime(campus_depatures['date'])
campus_depatures['departure_time'] = pd.to_datetime(campus_depatures['departure_time'])
self.campus_depatures = campus_depatures
# important normalizing factor
self.stays_per_person = self.building_stays.shape[0] / self.num_samples
# arrivals and departures can (should) normally be > 1 because a person has several arrivals departures over
# different days
self.arrivals_per_person = self.campus_arrivals.shape[0] / self.num_samples
self.departures_per_person = self.campus_depatures.shape[0] / self.num_samples
self.num_unique_people_in_all_samples = len(pd.unique(building_stays['person_id']))
min_time = min(campus_arrivals['arrival_time'])
min_time = min_time.replace(second=0, microsecond=0, minute=0, hour=min_time.hour)
# max(campus_depatures['end_time']) to make sure we got that last time interval, adding + 1 to make sure
# rounding doesn't make us miss the last interval
max_time = max(campus_depatures['departure_time'])
if max_time.hour == 23:
max_time = max_time.replace(second=59, microsecond=0, minute=59, hour=max_time.hour)
else:
max_time = max_time.replace(second=0, microsecond=0, minute=0, hour=max_time.hour + 1)
time_index_series = pd.date_range(
min_time,
max_time,
freq='60min')
self.time_demarkations = time_index_series.to_series().tolist()
######################## BOOTSTRAP ############
# self.bootstrap(7) # DEBUG
start_boostrap = time.time()
all_boostrap_data = pd.DataFrame()
pool = multiprocessing.Pool(cpu_count)
print('created bootstrap multiprocessing pool..', cpu_count)
all_boostrap_data_list = pool.map(self.bootstrap, list(range(self.n_bootstraps)))
pool.close()
pool.join()
# daily
self.all_boostrap_campus_daily_sample_occupancy = pd.concat(
[bs['daily_sample_occupancy'] for bs in all_boostrap_data_list])
self.all_boostrap_campus_daily_sample_inflow = pd.concat(
[bs['daily_sample_inflow'] for bs in all_boostrap_data_list])
self.all_boostrap_campus_daily_sample_outflow = pd.concat(
[bs['daily_sample_outflow'] for bs in all_boostrap_data_list])
# hourly
self.all_boostrap_inflow_data_campus_level_hourly = pd.concat(
[bs['hourly_sample_inflow'] for bs in all_boostrap_data_list])
self.all_boostrap_outflow_data_campus_level_hourly = pd.concat(
[bs['hourly_sample_outflow'] for bs in all_boostrap_data_list])
self.all_boostrap_occupancy_data_campus_level_hourly = pd.concat(
[bs['hourly_sample_occupancy'] for bs in all_boostrap_data_list])
print('bootstrapped finished, closed pool, took:', time.time() - start_boostrap, 'sec')
# print('>>', self.all_boostrap_agg.head()) #DEBUG
######################## CREATING METRICS ############
self.statistics_dict = {}
self.day_list = | pd.unique(self.all_boostrap_campus_daily_sample_occupancy['date']) | pandas.unique |
# last edited: 04/10/2021
#
# The functions pca_initial, pca_initial_, pca_final, and pca_final_ are adapted
# from a post by <NAME> here:
# https://nirpyresearch.com/classification-nir-spectra-principal-component-analysis-python/
#
# Retrieved in December 2020 and is licensed under Creative Commons Attribution 4.0
# International License. (https://creativecommons.org/licenses/by/4.0/)
#
#
# The function cluster_variance is adapted from a post by <NAME> here:
# https://medium.com/analytics-vidhya/choosing-the-best-k-value-for-k-means-clustering-d8b4616f8b86
#
# Retrieved in December 2020.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA as sk_pca
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from matplotlib import colors as c
def pca_initial(data): # Initial PCA function
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Initialise
skpca1 = sk_pca(n_components=30)
# Scale the features to have zero mean and standard deviation of 1
# This is important when correlating data with very different variances
nfeat1 = StandardScaler().fit_transform(feat)
# Fit the spectral data and extract the explained variance ratio
X1 = skpca1.fit(nfeat1)
expl_var_1 = X1.explained_variance_ratio_
# create scree plot
fig = plt.figure(dpi=100)
plt.bar(range(30), expl_var_1, label="Explained Variance %", color='blue', figure=fig)
plt.xticks(np.arange(len(expl_var_1)), np.arange(1, len(expl_var_1) + 1))
plt.plot(np.cumsum(expl_var_1), '-o', label='Cumulative variance %', color='green', figure=fig)
plt.xlabel('PC Number')
plt.legend()
return fig
def pca_initial_gui(data): # Initial PCA function (no standardscaler)
feat = (data.values[:, 3:]).astype('float64')
ncom = 30
# Initialise
skpca1 = sk_pca(n_components=ncom)
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
# nfeat1 = StandardScaler().fit_transform(feat)
# Fit the spectral data and extract the explained variance ratio
X1 = skpca1.fit(feat)
expl_var_1 = X1.explained_variance_ratio_
# create scree plot
fig = plt.figure(dpi=100, figsize=(10,5))
plt.bar(range(30), expl_var_1*100, label="Explained Variance %", color='blue', figure=fig)
plt.xticks(np.arange(len(expl_var_1)), np.arange(1, len(expl_var_1) + 1))
plt.plot(np.cumsum(expl_var_1)*100, '-o', label='Cumulative variance %', color='green', figure=fig)
plt.xlabel('PC Number')
plt.ylabel('Explained Variance (%)')
plt.legend()
return fig
def pca_final(data, ncomp): # PCA fitting with scores as result
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
nfeat1 = StandardScaler().fit_transform(feat)
skpca1 = sk_pca(n_components=ncomp)
# Transform on the scaled features
Xt1 = skpca1.fit_transform(nfeat1)
scores = pd.DataFrame(Xt1)
return scores
def pca_final_gui(data, ncomp): # PCA fitting with scores as result (no standardscaler)
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
skpca1 = sk_pca(n_components=ncomp)
# Transform on the scaled features
Xt1 = skpca1.fit_transform(feat)
scores = pd.DataFrame(Xt1)
return scores
def cluster_variance(data):
n = 10
variances = []
kmeans = []
K = list(range(1, n + 1))
for i in range(1, n + 1):
model = KMeans(n_clusters=i, random_state=82, verbose=0).fit(data)
kmeans.append(model)
variances.append(model.inertia_)
# variances,K,n=cluster_variance(10)
fig = plt.figure(dpi=100)
plt.plot(K, variances)
plt.ylabel("Inertia ( SSE )")
plt.xlabel("K Value")
plt.xticks(list(range(1, n + 1)))
return fig
def cluster_variance_sil(data):
n = 15
variances = []
kmeans = []
K = list(range(2, n + 1))
for i in range(2, n + 1):
model = KMeans(n_clusters=i, random_state=82, verbose=0).fit(data)
label = model.labels_
kmeans.append(model)
variances.append(model.inertia_)
sil_coeff = silhouette_score(data, label, metric='euclidean')
print("For n_clusters={}, The Silhouette Coefficient is {}".format(i, sil_coeff))
# variances,K,n=cluster_variance(10)
fig = plt.figure(dpi=100)
plt.plot(K, variances)
plt.ylabel("Inertia ( SSE )")
plt.xlabel("K Value")
plt.xticks(list(range(2, n + 1)))
return fig
def kmeans_(k, data):
km_res = KMeans(n_clusters=k).fit(data)
y_km = km_res.labels_
clusters = km_res.cluster_centers_
dist = km_res.transform(data)
distance = pd.DataFrame(dist)
result = pd.DataFrame(y_km)
result.columns = ['cluster']
return result, clusters, distance
def gen_map(data, result, cmap, dpi):
coord = pd.DataFrame(data[data.columns[0:2]])
coord_cluster = coord.join(result)
coord_cluster.columns = ['x', 'y', 'c']
grid_base = coord_cluster.pivot('y', 'x').values
X = coord_cluster.x.unique()
X.sort()
Y = coord_cluster.y.unique()
Y.sort()
cMap = c.ListedColormap(cmap)
fig = plt.figure(dpi=dpi)
plt.pcolormesh(X, Y, grid_base, shading='auto', cmap=cMap, alpha=0.7, figure=fig)
plt.clim(0, np.max(grid_base))
plt.gca().set_aspect('equal')
plt.gca().invert_yaxis()
# plt.savefig('test.png', transparent=True)
return fig
def res_vbose(data, result):
coord = | pd.DataFrame(data) | pandas.DataFrame |
import os
import shutil
import logging
import pandas as pd
import matplotlib
matplotlib.use("agg") # no need for tk
from supervised.automl import AutoML
from frameworks.shared.callee import call_run, result, output_subdir, utils
log = logging.getLogger(os.path.basename(__file__))
def run(dataset, config):
log.info("\n**** mljar-supervised ****\n")
column_names, _ = zip(*dataset.columns)
column_types = dict(dataset.columns)
X_train = pd.DataFrame(dataset.train.X, columns=column_names).astype(
column_types, copy=False
)
X_test = | pd.DataFrame(dataset.test.X, columns=column_names) | pandas.DataFrame |
"""
@author: <NAME>
"""
import os
import re
import numpy as np
import pandas as pd
import json
import pickle
from argparse import ArgumentParser
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report
import sys
import warnings
parser = ArgumentParser()
# For training & validation
parser.add_argument("--trainDir", type=str, default="./datasets/train", help="path to training data")
parser.add_argument("--validDir", type=str, default="./datasets/valid",help="path to validation data")
parser.add_argument("--outDir", type=str, default="experiments", help="directory to store trained model & classification report")
parser.add_argument("--features", type=str, default="lri", choices=["lri", "lr", "li", "ri"], help="combination of context feature vectors for the experiment")
parser.add_argument("--modelType", type=str, default="svm", choices=["knn", "svm", "mlp"], help="type of model to experiment")
# For testing
parser.add_argument('--test', action='store_true', help="indicator for test data")
parser.add_argument("--testDir", type=str, default="./datasets/test", help="path to test data")
parser.add_argument("--testOutDir", type=str, default="./test_out", help="path to output files for storing test predictions")
parser.add_argument("--modelsDir", type=str, help="path to trained models")
# Tunable params for weighted k-NN
parser.add_argument("--k", type=int, default=5, help="[k-NN] no. of neighbors")
parser.add_argument("--beta", type=float, default=1, help="[k-NN] relative weight of right context vector")
parser.add_argument("--gamma", type=float, default=1, help="[k-NN] relative weight of interplay context vector")
# Tunable params for MLP
parser.add_argument("--numNeurons", type=int, default=100, help="[MLP] no. of neurons in hidden layer")
# Tunable params for SVM
parser.add_argument("--C", type=float, default=1, help="[SVM] regularization parameter")
class PSDExperiment:
def __init__(self, model_type, params, features, train_path, valid_path, output_dir, test=False):
self.features = features
self.model_type = model_type
self.params = params
if not test:
print("Initializing experiment...")
self.train_dir = train_path
self.valid_dir = valid_path
self.model_identifier = f"{features}_{model_type}"
for key, value in params.items():
self.model_identifier += f"_{key}={value}"
self.output_dir = output_dir
self.models_dir = os.path.join(self.output_dir, "models", self.model_identifier)
self.reports_dir = os.path.join(self.output_dir, "reports")
for folder in [self.models_dir, self.reports_dir ]:
if not os.path.exists(folder):
os.makedirs(folder)
self.prepositions_with_one_sense = {}
else:
with open("sense_mapping.json", "r") as f:
self.sense_mappings = json.load(f)
def __initializeModel(self):
if self.model_type == "knn":
return KNeighborsClassifier(n_neighbors=self.params["k"])
elif self.model_type == "mlp":
return MLPClassifier(hidden_layer_sizes=(self.params["num_neurons"],), random_state=1, max_iter=200)
elif self.model_type == "svm":
return SVC(kernel="linear", C=self.params["C"])
else:
print("Invalid model! Exitting...")
exit(1)
def __getFeatures(self, df):
X_vl = np.stack(df["vl"].values, axis=0)
X_vr = np.stack(df["vr"].values, axis=0)
X_vi = np.stack(df["vi"].values, axis=0)
if self.features == "lri":
if self.model_type == "knn":
return X_vl + self.params["beta"]*X_vr + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vl, X_vr, X_vi), axis=1)
elif self.features == "lr":
if self.model_type == "knn":
return X_vl + self.params["beta"]*X_vr
else:
return np.concatenate((X_vl, X_vr), axis=1)
elif self.features == "li":
if self.model_type == "knn":
return X_vl + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vl, X_vi), axis=1)
elif self.features == "ri":
if self.model_type == "knn":
return self.params["beta"]*X_vr + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vr, X_vi), axis=1)
def trainModels(self):
print(f"Training {self.model_type} models...")
for prep_train_data in os.listdir(self.train_dir):
preposition = re.findall(r"([a-z]*)\.pkl", prep_train_data)[0]
train_df = pd.read_pickle(os.path.join(self.train_dir, prep_train_data))
X = self.__getFeatures(train_df)
y = train_df["preposition_sense"]
num_senses = len(y.unique())
print("Preposition: %s \tNumber of senses: %d" % (preposition, num_senses))
if num_senses > 1:
# Train a model to disambiguate each preposition
model = self.__initializeModel()
model.fit(X, y)
pickle.dump(model, open(os.path.join(self.models_dir, preposition + ".sav"), 'wb'))
else:
self.prepositions_with_one_sense[preposition] = y[0]
print("Training completed!")
print("==================================================================")
def validateModels(self):
print("Validating models...")
y_actual_all = | pd.Series([], dtype=str) | pandas.Series |
import pandas as pd
def break_even_moneyline(probability):
if probability > 0.5:
x = -(100 / (1-probability))+100
else:
x = 100/probability - 100
return x
def predict_probs_and_moneylines(data):
predictions = | pd.DataFrame(columns=["Team1", "Seed1", "Team2", "Seed2", "Win%1", "Win%2", "ML1", "ML2"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 10:15:43 2018
Build Neural network from perceptron,treating biases as part of weights and use
matrix computation to optimize the stochastic gradient descent method.
@author: Feng
"""
#### Libraries
import random
import numpy as np
class NeuralNetwork(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.weights = [np.random.randn(y, x+1) \
for x, y in zip((sizes[:-1]), sizes[1:])] # biases are included in weights
def feedforward(self, a):
for w in self.weights:
a = np.concatenate((a,np.array([1]).reshape(1,1))) # add bias neuron
a = sigmoid(np.dot(w, a))
return a
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [training_data[k:k+mini_batch_size] \
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print ("Epoch {0}: {1} / {2}".format( \
j, self.evaluate_0(test_data), n_test))
else:
print ("Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_w = self.backprop(x, y)
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
def backprop(self, x, y):
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [activation] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for w in self.weights:
activation = np.concatenate((activation,np.array([1]).reshape(1,1)))
activations[-1]=activation
z = np.dot(w, activation)
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta)[:-1]
delta = delta * sp
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return nabla_w
def evaluate_0(self, test_data):
test_results = [(int(self.feedforward(x)[1][0]>0.1), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
#### prepare training data ,Validation data, test data
import pandas as pd
car_data=pd.read_csv('car.csv')
car_data = car_data.reindex(columns=['IsBadBuy','Size','Make','VNST','IsOnlineSale','VehicleAge','Transmission',
'WheelType','Auction'])
shuffler= np.random.permutation(len(car_data))
car_shuffle = car_data.take(shuffler) # pandas' shuffling method in comparison of random.shuffle
# X preparation
Size = pd.get_dummies(car_data['Size'],prefix='Size') # generate dummy varibles from categorical varible
Make = pd.get_dummies(car_data['Make'],prefix='Make')
VNST = pd.get_dummies(car_data['VNST'],prefix='VNST')
VehicleAge = pd.get_dummies(car_data['VehicleAge'],prefix='VehicleAge')
WheelType = pd.get_dummies(car_data['WheelType'],prefix='WheelType')
Auction = | pd.get_dummies(car_data['Auction'],prefix='Auction') | pandas.get_dummies |
import numpy as np
import pandas as pd
from sklearn import preprocessing, linear_model, metrics
import gc; gc.enable()
import random
import time, datetime
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import TheilSenRegressor, BayesianRidge
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
np.random.seed(1122)
# Util functions
def print_runtime(start, end):
print("runtime: {}".format( datetime.timedelta(seconds=(end-start)/60)))
def print_dataframe_size(name, df):
print("size of {}: {:.3f} MB".format(name, df.memory_usage(index=True).sum()/1E6))
# Read datasets
print('Reading datasets...')
start = time.time()
dtypes = {'id':'uint32', 'item_nbr':'int32', 'store_nbr':'int8', 'onpromotion':str}
print('Reading train and test...')
train = pd.read_csv('../input/train.csv', dtype=dtypes, parse_dates=['date'])
test = pd.read_csv('../input/test.csv', dtype=dtypes, parse_dates=['date'])
print('Reading others...')
items = pd.read_csv('../input/items.csv', dtype={'item_nbr':'int32', 'perishable':bool})
stores = pd.read_csv('../input/stores.csv', dtype={'store_nbr':'uint8', 'cluster':'uint8' })
transactions = pd.read_csv('../input/transactions.csv', dtype={'store_nbr':'uint8'}, parse_dates=['date'])
holidays = pd.read_csv('../input/holidays_events.csv', dtype={'transferred':str}, parse_dates=['date'])
holidays['transferred'] = holidays['transferred'].map({'False': 0, 'True': 1})
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'])
print_runtime(start, time.time())
# Dataset processing
print('\nDatasets processing...');
start_dp = time.time()
## Reduce training dataset
train = train[(train['date'].dt.month == 8) & (train['date'].dt.day > 15)]
#train = train[(train['date'].dt.month == 8)]
train['onpromotion'] = train['onpromotion'].map({'False': 0, 'True': 1})
test['onpromotion'] = test['onpromotion'].map({'False': 0, 'True': 1})
#train2017 = train[(train['date'].dt.year == 2017)]
#train201608 = train[(train['date'].dt.year == 2016) & (train['date'].dt.day > 15)]
#train2016 = train[(train['date'].dt.year == 2016))]
#train = pd.concat([train2017,train2016])
#del train2017, train2016; gc.collect();
#train[(train['date'].dt.year == 2016) | (train['date'].dt.year == 2017)]
#train = train[(train['date'].dt.year == 2017)]
#train = train[(train['date'].dt.month >= 5)]
# Transform target
target = train['unit_sales'].values
target[target < 0.] = 0.
train['unit_sales'] = np.log1p(target)
# Transforma transactions
#tx = transactions['transactions'].values
#transactions[tx < 0.] = 0.
transactions['transactions'] = np.log1p(transactions['transactions'])
def df_lbl_enc(df):
for c in df.columns:
if df[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
df[c] = lbl.fit_transform(df[c])
print(c)
return df
def df_lbl_enc_2(df, cols):
for c in cols:
lbl = preprocessing.LabelEncoder()
df[c] = lbl.fit_transform(df[c])
return df
def df_transform_date(df):
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import pysubgroup as ps
import pandas as pd
from subgroup_sem import SEMTarget, TestQF
############################################################################################
# imoport and preprocess data
############################################################################################
data = | pd.read_csv('artificial_data.csv') | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.