prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import quantipy as qp
# from matplotlib import pyplot as plt
# import matplotlib.image as mpimg
import string
import pickle
import warnings
try:
import seaborn as sns
from PIL import Image
except:
pass
from quantipy.core.cache import Cache
from quantipy.core.view import View
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.helpers.functions import emulate_meta
from quantipy.core.tools.view.logic import (has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection, get_logic_index)
from quantipy.core.helpers.functions import (paint_dataframe,
emulate_meta,
get_text,
finish_text_key)
from quantipy.core.tools.dp.prep import recode
from quantipy.core.tools.qp_decorators import lazy_property
from operator import add, sub, mul
from operator import truediv as div
#from scipy.stats.stats import _ttest_finish as get_pval
from scipy.stats._stats_py import _ttest_finish as get_pval
from scipy.stats import chi2 as chi2dist
from scipy.stats import f as fdist
from itertools import combinations, chain, product
from collections import defaultdict, OrderedDict, Counter
import gzip
try:
import dill
except:
pass
import json
import copy
import time
import sys
import re
from quantipy.core.rules import Rules
_TOTAL = '@'
_AXES = ['x', 'y']
class ChainManager(object):
def __init__(self, stack):
self.stack = stack
self.__chains = []
self.source = 'native'
self.build_info = {}
self._hidden = []
def __str__(self):
return '\n'.join([chain.__str__() for chain in self])
def __repr__(self):
return self.__str__()
def __getitem__(self, value):
if isinstance(value, str):
element = self.__chains[self._idx_from_name(value)]
is_folder = isinstance(element, dict)
if is_folder:
return list(element.values())[0]
else:
return element
else:
return self.__chains[value]
def __len__(self):
"""returns the number of cached Chains"""
return len(self.__chains)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < self.__len__():
obj = self[self.n]
self.n += 1
return obj
else:
raise StopIteration
next = __next__
def add_chain(self, chain):
self.__chains.append(chain)
@property
def folders(self):
"""
Folder indices, names and number of stored ``qp.Chain`` items (as tuples).
"""
return [(self.__chains.index(f), list(f.keys())[0], len(list(f.values())[0]))
for f in self if isinstance(f, dict)]
@property
def singles(self):
"""
The list of all non-folder ``qp.Chain`` indices and names (as tuples).
"""
return list(zip(self.single_idxs, self.single_names))
@property
def chains(self):
"""
The flattened list of all ``qp.Chain`` items of self.
"""
all_chains = []
for c in self:
if isinstance(c, dict):
all_chains.extend(list(c.values())[0])
else:
all_chains.append(c)
return all_chains
@property
def folder_idxs(self):
"""
The folders' index positions in self.
"""
return [f[0] for f in self.folders]
@property
def folder_names(self):
"""
The folders' names from self.
"""
return [f[1] for f in self.folders]
@property
def single_idxs(self):
"""
The ``qp.Chain`` instances' index positions in self.
"""
return [self.__chains.index(c) for c in self if isinstance(c, Chain)]
@property
def single_names(self):
"""
The ``qp.Chain`` instances' names.
"""
return [s.name for s in self if isinstance(s, Chain)]
@property
def hidden(self):
"""
All ``qp.Chain`` elements that are hidden.
"""
return [c.name for c in self.chains if c.hidden]
@property
def hidden_folders(self):
"""
All hidden folders.
"""
return [n for n in self._hidden if n in self.folder_names]
def _content_structure(self):
return ['folder' if isinstance(k, dict) else 'single' for k in self]
def _singles_to_idx(self):
return {name: i for i, name in list(self._idx_to_singles().items())}
def _idx_to_singles(self):
return dict(self.singles)
def _idx_fold(self):
return dict([(f[0], f[1]) for f in self.folders])
def _folders_to_idx(self):
return {name: i for i, name in list(self._idx_fold().items())}
def _names(self, unroll=False):
if not unroll:
return self.folder_names + self.single_names
else:
return [c.name for c in self.chains]
def _idxs_to_names(self):
singles = self.singles
folders = [(f[0], f[1]) for f in self.folders]
return dict(singles + folders)
def _names_to_idxs(self):
return {n: i for i, n in list(self._idxs_to_names().items())}
def _name_from_idx(self, name):
return self._idxs_to_names()[name]
def _idx_from_name(self, idx):
return self._names_to_idxs()[idx]
def _is_folder_ref(self, ref):
return ref in self._folders_to_idx() or ref in self._idx_fold()
def _is_single_ref(self, ref):
return ref in self._singles_to_idx or ref in self._idx_to_singles()
def _uniquify_names(self):
all_names = Counter(self.single_names + self.folder_names)
single_name_occ = Counter(self.single_names)
folder_name_occ = {folder: Counter([c.name for c in self[folder]])
for folder in self.folder_names}
for struct_name in all_names:
if struct_name in folder_name_occ:
iter_over = folder_name_occ[struct_name]
is_folder = struct_name
else:
iter_over = single_name_occ
is_folder = False
for name, occ in list(iter_over.items()):
if occ > 1:
new_names = ['{}_{}'.format(name, i) for i in range(1, occ + 1)]
idx = [s[0] for s in self.singles if s[1] == name]
pairs = list(zip(idx, new_names))
if is_folder:
for idx, c in enumerate(self[is_folder]):
c.name = pairs[idx][1]
else:
for p in pairs:
self.__chains[p[0]].name = p[1]
return None
def _set_to_folderitems(self, folder):
"""
Will keep only the ``values()`` ``qp.Chain`` item list from the named
folder. Use this for within-folder-operations...
"""
if not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
else:
org_chains = self.__chains[:]
org_index = self._idx_from_name(folder)
self.__chains = self[folder]
return org_chains, org_index
def _rebuild_org_folder(self, folder, items, index):
"""
After a within-folder-operation this method is using the returns
of ``_set_to_folderitems`` to rebuild the originating folder.
"""
self.fold(folder)
new_folder = self.__chains[:]
self.__chains = items
self.__chains[index] = new_folder[0]
return None
@staticmethod
def _dupes_in_chainref(chain_refs):
return len(set(chain_refs)) != len(chain_refs)
def _check_equality(self, other, return_diffs=True):
"""
"""
chains1 = self.chains
chains2 = other.chains
diffs = {}
if not len(chains1) == len(chains2):
return False
else:
paired = list(zip(chains1, chains2))
for c1, c2 in paired:
atts1 = c1.__dict__
atts2 = c2.__dict__
for att in list(atts1.keys()):
if isinstance(atts1[att], (pd.DataFrame, pd.Index)):
if not atts1[att].equals(atts2[att]):
diffs[att] = [atts1[att], atts2[att]]
else:
if atts1[att] != atts2[att]:
diffs[att] = [atts1[att], atts2[att]]
return diffs if return_diffs else not diffs
def _test_same_structure(self, other):
"""
"""
folders1 = self.folders
singles1 = self.singles
folders2 = other.folders
singles2 = other.singles
if (folders1 != folders2 or singles1 != singles2):
return False
else:
return True
def equals(self, other):
"""
Test equality of self to another ``ChainManager`` object instance.
.. note::
Only the flattened list of ``Chain`` objects stored are tested, i.e.
any folder structure differences are ignored. Use ``compare()`` for
a more detailed comparison.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
Returns
-------
equality : bool
"""
return self._check_equality(other, False)
def compare(self, other, strict=True, full_summary=True):
"""
Compare structure and content of self to another ``ChainManager`` instance.
Parameters
----------
other : ``qp.ChainManager``
Another ``ChainManager`` object to compare.
strict : bool, default True
Test if the structure of folders vs. single Chain objects is the
same in both ChainManager instances.
full_summary : bool, default True
``False`` will disable the detailed comparison ``pd.DataFrame``
that informs about differences between the objects.
Returns
-------
result : str
A brief feedback message about the comparison results.
"""
diffs = []
if strict:
same_structure = self._test_same_structure(other)
if not same_structure:
diffs.append('s')
check = self._check_equality(other)
if isinstance(check, bool):
diffs.append('l')
else:
if check: diffs.append('c')
report_full = ['_frame', '_x_keys', '_y_keys', 'index', '_columns',
'base_descriptions', 'annotations']
diffs_in = ''
if diffs:
if 'l' in diffs:
diffs_in += '\n -Length (number of stored Chain objects)'
if 's' in diffs:
diffs_in += '\n -Structure (folders and/or single Chain order)'
if 'c' in diffs:
diffs_in += '\n -Chain elements (properties and content of Chain objects)'
if diffs_in:
result = 'ChainManagers are not identical:\n'
result += '--------------------------------' + diffs_in
else:
result = 'ChainManagers are identical.'
print(result)
return None
def save(self, path, keep_stack=False):
"""
"""
if not keep_stack:
del self.stack
self.stack = None
f = open(path, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
return None
@staticmethod
def load(path):
"""
"""
f = open(path, 'rb')
obj = pickle.load(f)
f.close()
return obj
def _toggle_vis(self, chains, mode='hide'):
if not isinstance(chains, list): chains = [chains]
for chain in chains:
if isinstance(chain, dict):
fname = list(chain.keys())[0]
elements = list(chain.values())[0]
fidx = self._idx_from_name(fname)
folder = self[fidx][fname]
for c in folder:
if c.name in elements:
c.hidden = True if mode == 'hide' else False
if mode == 'hide' and not c.name in self._hidden:
self._hidden.append(c.name)
if mode == 'unhide' and c.name in self._hidden:
self._hidden.remove(c.name)
else:
if chain in self.folder_names:
for c in self[chain]:
c.hidden = True if mode == 'hide' else False
else:
self[chain].hidden = True if mode == 'hide' else False
if mode == 'hide':
if not chain in self._hidden:
self._hidden.append(chain)
else:
if chain in self._hidden:
self._hidden.remove(chain)
return None
def hide(self, chains):
"""
Flag elements as being hidden.
Parameters
----------
chains : (list) of int and/or str or dict
The ``qp.Chain`` item and/or folder names to hide. To hide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name.
Returns
-------
None
"""
self._toggle_vis(chains, 'hide')
return None
def unhide(self, chains=None):
"""
Unhide elements that have been set as ``hidden``.
Parameters
----------
chains : (list) of int and/or str or dict, default None
The ``qp.Chain`` item and/or folder names to unhide. To unhide *within*
a folder use a dict to map the desired Chain names to the belonging
folder name. If not provided, all hidden elements will be unhidden.
Returns
-------
None
"""
if not chains: chains = self.folder_names + self.single_names
self._toggle_vis(chains, 'unhide')
return None
def clone(self):
"""
Return a full (deep) copy of self.
"""
return copy.deepcopy(self)
def insert(self, other_cm, index=-1, safe_names=False):
"""
Add elements from another ``ChainManager`` instance to self.
Parameters
----------
other_cm : ``quantipy.ChainManager``
A ChainManager instance to draw the elements from.
index : int, default -1
The positional index after which new elements will be added.
Defaults to -1, i.e. elements are appended at the end.
safe_names : bool, default False
If True and any duplicated element names are found after the
operation, names will be made unique (by appending '_1', '_2', '_3',
etc.).
Returns
------
None
"""
if not isinstance(other_cm, ChainManager):
raise ValueError("other_cm must be a quantipy.ChainManager instance.")
if not index == -1:
before_c = self.__chains[:index+1]
after_c = self.__chains[index+1:]
new_chains = before_c + other_cm.__chains + after_c
self.__chains = new_chains
else:
self.__chains.extend(other_cm.__chains)
if safe_names: self._uniquify_names()
return None
def merge(self, folders, new_name=None, drop=True):
"""
Unite the items of two or more folders, optionally providing a new name.
If duplicated ``qp.Chain`` items are found, the first instance will be
kept. The merged folder will take the place of the first folder named
in ``folders``.
Parameters
----------
folders : list of int and/or str
The folders to merge refernced by their positional index or by name.
new_name : str, default None
Use this as the merged folder's name. If not provided, the name
of the first folder in ``folders`` will be used instead.
drop : bool, default True
If ``False``, the original folders will be kept alongside the
new merged one.
Returns
-------
None
"""
if not isinstance(folders, list):
err = "'folders' must be a list of folder references!"
raise TypeError(err)
if len(folders) == 1:
err = "'folders' must contain at least two folder names!"
raise ValueError(err)
if not all(self._is_folder_ref(f) for f in folders):
err = "One or more folder names from 'folders' do not exist!"
ValueError(err)
folders = [f if isinstance(f, str) else self._name_from_idx(f)
for f in folders]
folder_idx = self._idx_from_name(folders[0])
if not new_name: new_name = folders[0]
merged_items = []
seen_names = []
for folder in folders:
for chain in self[folder]:
if not chain.name in seen_names:
merged_items.append(chain)
seen_names.append(chain.name)
if drop:
self.__chains[folder_idx] = {new_name: merged_items}
remove_folders = folders[1:] if new_name != folders[0] else folders
for r in remove_folders:
self.remove(r)
else:
start = self.__chains[:folder_idx]
end = self.__chains[folder_idx:]
self.__chains = start + [{new_name: merged_items}] + end
return None
def fold(self, folder_name=None, chains=None):
"""
Arrange non-``dict`` structured ``qp.Chain`` items in folders.
All separate ``qp.Chain`` items will be mapped to their ``name``
property being the ``key`` in the transformed ``dict`` structure.
Parameters
----------
folder_name : str, default None
Collect all items in a folder keyed by the provided name. If the
key already exists, the items will be appended to the ``dict``
values.
chains : (list) of int and/or str, default None
Select specific ``qp.Chain`` items by providing their positional
indices or ``name`` property value for moving only a subset to the
folder.
Returns
-------
None
"""
if chains:
if not isinstance(chains, list): chains = [chains]
if any(self._is_folder_ref(c) for c in chains):
err = 'Cannot build folder from other folders!'
raise ValueError(err)
all_chain_names = []
singles = []
for c in chains:
if isinstance(c, str):
all_chain_names.append(c)
elif isinstance(c, int) and c in self._idx_to_singles():
all_chain_names.append(self._idx_to_singles()[c])
for c in all_chain_names:
singles.append(self[self._singles_to_idx()[c]])
else:
singles = [s for s in self if isinstance(s, Chain)]
if self._dupes_in_chainref(singles):
err = "Cannot build folder from duplicate qp.Chain references: {}"
raise ValueError(err.format(singles))
for s in singles:
if folder_name:
if folder_name in self.folder_names:
self[folder_name].append(s)
else:
self.__chains.append({folder_name: [s]})
del self.__chains[self._singles_to_idx()[s.name]]
else:
self.__chains[self._singles_to_idx()[s.name]] = {s.name: [s]}
return None
def unfold(self, folder=None):
"""
Remove folder but keep the collected items.
The items will be added starting at the old index position of the
original folder.
Parameters
----------
folder : (list of) str, default None
The name of the folder to drop and extract items from. If not
provided all folders will be unfolded.
Returns
-------
None
"""
if not folder:
folder = self.folder_names
else:
if not isinstance(folder, list): folder = [folder]
invalid = [f for f in folder if f not in self.folder_names]
if invalid:
err = "Folder(s) named '{}' not found.".format(invalid)
raise KeyError(err)
for f in folder:
old_pos = self._idx_from_name(f)
items = self[f]
start = self.__chains[: old_pos]
end = self.__chains[old_pos + 1: ]
self.__chains = start + items + end
return None
def remove(self, chains, folder=None, inplace=True):
"""
Remove (folders of) ``qp.Chain`` items by providing a list of indices
or names.
Parameters
----------
chains : (list) of int and/or str
``qp.Chain`` items or folders by provided by their positional
indices or ``name`` property.
folder : str, default None
If a folder name is provided, items will be dropped within that
folder only instead of removing all found instances.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = cm._set_to_folderitems(folder)
if not isinstance(chains, list): chains = [chains]
remove_idxs= [c if isinstance(c, int) else cm._idx_from_name(c)
for c in chains]
if cm._dupes_in_chainref(remove_idxs):
err = "Cannot remove with duplicate chain references: {}"
raise ValueError(err.format(remove_idxs))
new_items = []
for pos, c in enumerate(cm):
if not pos in remove_idxs: new_items.append(c)
cm.__chains = new_items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def cut(self, values, ci=None, base=False, tests=False):
"""
Isolate selected axis values in the ``Chain.dataframe``.
Parameters
----------
values : (list of) str
The string must indicate the raw (i.e. the unpainted) second level
axis value, e.g. ``'mean'``, ``'net_1'``, etc.
ci : {'counts', 'c%', None}, default None
The cell item version to target if multiple frequency representations
are present.
base : bool, default False
Controls keeping any existing base view aggregations.
tests : bool, default False
Controls keeping any existing significance test view aggregations.
Returns
-------
None
"""
if not isinstance(values, list): values = [values]
if 'cbase' in values:
values[values.index('cbase')] = 'All'
if base and not 'All' in values:
values = ['All'] + values
for c in self.chains:
# force ci parameter for proper targeting on array summaries...
if c.array_style == 0 and ci is None:
_ci = c.cell_items.split('_')[0]
if not _ci.startswith('counts'):
ci = '%'
else:
ci = 'counts'
if c.sig_test_letters: c._remove_letter_header()
idxs, names, order = c._view_idxs(
values, keep_tests=tests, keep_bases=base, names=True, ci=ci)
idxs = [i for _, i in sorted(zip(order, idxs))]
names = [n for _, n in sorted(zip(order, names))]
if c.ci_count > 1: c._non_grouped_axis()
if c.array_style == 0:
c._fill_cells()
start, repeat = c._row_pattern(ci)
c._frame = c._frame.iloc[start::repeat, idxs]
else:
c._frame = c._frame.iloc[idxs, :]
c.index = c._slice_edited_index(c.index, idxs)
new_views = OrderedDict()
for v in c.views.copy():
if not v in names:
del c._views[v]
else:
c._views[v] = names.count(v)
if not c._array_style == 0:
if not tests:
c.sig_test_letters = None
else:
c._frame = c._apply_letter_header(c._frame)
c.edited = True
return None
def join(self, title='Summary'):
"""
Join **all** ``qp.Chain```elements, concatenating along the matching axis.
Parameters
----------
title : {str, 'auto'}, default 'Summary'
The new title for the joined axis' index representation.
Returns
-------
None
"""
custom_views = []
self.unfold()
chains = self.chains
totalmul = len(chains[0]._frame.columns.get_level_values(0).tolist())
concat_dfs = []
new_labels = []
for c in chains:
new_label = []
if c.sig_test_letters:
c._remove_letter_header()
c._frame = c._apply_letter_header(c._frame)
df = c.dataframe
if not c.array_style == 0:
new_label.append(df.index.get_level_values(0).values.tolist()[0])
new_label.extend((len(c.describe()) - 1) * [''])
else:
new_label.extend(df.index.get_level_values(1).values.tolist())
names = ['Question', 'Values']
join_idx = pd.MultiIndex.from_product([[title], new_label], names=names)
df.index = join_idx
df.rename(columns={c._x_keys[0]: 'Total'}, inplace=True)
if not c.array_style == 0:
custom_views.extend(c._views_per_rows())
else:
df.columns.set_levels(levels=[title]*totalmul, level=0, inplace=True)
concat_dfs.append(df)
new_df = pd.concat(concat_dfs, axis=0, join='inner')
self.chains[0]._frame = new_df
self.reorder([0])
self.rename({self.single_names[0]: title})
self.fold()
self.chains[0]._custom_views = custom_views
return None
def reorder(self, order, folder=None, inplace=True):
"""
Reorder (folders of) ``qp.Chain`` items by providing a list of new
indices or names.
Parameters
----------
order : list of int and/or str
The folder or ``qp.Chain`` references to determine the new order
of items. Any items not referenced will be removed from the new
order.
folder : str, default None
If a folder name is provided, items will be sorted within that
folder instead of applying the sorting to the general items
collection.
inplace : bool, default True
By default the new order is applied inplace, set to ``False`` to
return a new object instead.
Returns
-------
None
"""
if inplace:
cm = self
else:
cm = self.clone()
if folder:
org_chains, org_index = self._set_to_folderitems(folder)
if not isinstance(order, list):
err = "'order' must be a list!"
raise ValueError(err)
new_idx_order = []
for o in order:
if isinstance(o, int):
new_idx_order.append(o)
else:
new_idx_order.append(self._idx_from_name(o))
if cm._dupes_in_chainref(new_idx_order):
err = "Cannot reorder from duplicate qp.Chain references: {}"
raise ValueError(err.format(new_idx_order))
items = [self.__chains[idx] for idx in new_idx_order]
cm.__chains = items
if folder: cm._rebuild_org_folder(folder, org_chains, org_index)
if inplace:
return None
else:
return cm
def rename(self, names, folder=None):
"""
Rename (folders of) ``qp.Chain`` items by providing a mapping of old
to new keys.
Parameters
----------
names : dict
Maps existing names to the desired new ones, i.e.
{'old name': 'new names'} pairs need to be provided.
folder : str, default None
If a folder name is provided, new names will only be applied
within that folder. This is without effect if all ``qp.Chain.name``
properties across the items are unique.
Returns
-------
None
"""
if not isinstance(names, dict):
err = "''names' must be a dict of old_name: new_name pairs."
raise ValueError(err)
if folder and not folder in self.folder_names:
err = "A folder named '{}' does not exist!".format(folder)
raise KeyError(err)
for old, new in list(names.items()):
no_folder_name = folder and not old in self._names(False)
no_name_across = not folder and not old in self._names(True)
if no_folder_name and no_name_across:
err = "'{}' is not an existing folder or ``qp.Chain`` name!"
raise KeyError(err.format(old))
else:
within_folder = old not in self._names(False)
if not within_folder:
idx = self._idx_from_name(old)
if not isinstance(self.__chains[idx], dict):
self.__chains[idx].name = new
else:
self.__chains[idx] = {new: self[old][:]}
else:
iter_over = self[folder] if folder else self.chains
for c in iter_over:
if c.name == old: c.name = new
return None
def _native_stat_names(self, idxvals_list, text_key=None):
"""
"""
if not text_key: text_key = 'en-GB'
replacements = {
'en-GB': {
'Weighted N': 'Base', # Crunch
'N': 'Base', # Crunch
'Mean': 'Mean', # Dims
'StdDev': 'Std. dev', # Dims
'StdErr': 'Std. err. of mean', # Dims
'SampleVar': 'Sample variance' # Dims
},
}
native_stat_names = []
for val in idxvals_list:
if val in replacements[text_key]:
native_stat_names.append(replacements[text_key][val])
else:
native_stat_names.append(val)
return native_stat_names
def _get_ykey_mapping(self):
ys = []
letters = string.ascii_uppercase + string.ascii_lowercase
for c in self.chains:
if c._y_keys not in ys:
ys.append(c._y_keys)
return list(zip(ys, letters))
def describe(self, by_folder=False, show_hidden=False):
"""
Get a structual summary of all ``qp.Chain`` instances found in self.
Parameters
----------
by_folder : bool, default False
If True, only information on ``dict``-structured (folder-like)
``qp.Chain`` items is shown, multiindexed by folder names and item
enumerations.
show_hidden : bool, default False
If True, the summary will also include elements that have been set
hidden using ``self.hide()``.
Returns
-------
None
"""
folders = []
folder_items = []
variables = []
names = []
array_sum = []
sources = []
banner_ids = []
item_pos = []
hidden = []
bannermap = self._get_ykey_mapping()
for pos, chains in enumerate(self):
is_folder = isinstance(chains, dict)
if is_folder:
folder_name = list(chains.keys())
chains = list(chains.values())[0]
folder_items.extend(list(range(0, len(chains))))
item_pos.extend([pos] * len(chains))
else:
chains = [chains]
folder_name = [None]
folder_items.append(None)
item_pos.append(pos)
if chains[0].structure is None:
variables.extend([c._x_keys[0] for c in chains])
names.extend([c.name for c in chains])
folders.extend(folder_name * len(chains))
array_sum.extend([True if c.array_style > -1 else False
for c in chains])
sources.extend(c.source if not c.edited else 'edited'
for c in chains)
for c in chains:
for m in bannermap:
if m[0] == c._y_keys: banner_ids.append(m[1])
else:
variables.extend([chains[0].name])
names.extend([chains[0].name])
folders.extend(folder_name)
array_sum.extend([False])
sources.extend(c.source for c in chains)
banner_ids.append(None)
for c in chains:
if c.hidden:
hidden.append(True)
else:
hidden.append(False)
df_data = [item_pos,
names,
folders,
folder_items,
variables,
sources,
banner_ids,
array_sum,
hidden]
df_cols = ['Position',
'Name',
'Folder',
'Item',
'Variable',
'Source',
'Banner id',
'Array',
'Hidden']
df = pd.DataFrame(df_data).T
df.columns = df_cols
if by_folder:
df = df.set_index(['Position', 'Folder', 'Item'])
if not show_hidden:
df = df[df['Hidden'] == False][df.columns[:-1]]
return df
def from_mtd(self, mtd_doc, ignore=None, paint=True, flatten=False):
"""
Convert a Dimensions table document (.mtd) into a collection of
quantipy.Chain representations.
Parameters
----------
mtd_doc : (pandified) .mtd
A Dimensions .mtd file or the returned result of ``pandify_mtd()``.
A "pandified" .mtd consists of ``dict`` of ``pandas.DataFrame``
and metadata ``dict``. Additional text here...
ignore : bool, default False
Text
labels : bool, default True
Text
flatten : bool, default False
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the pandas-converted
.mtd file.
"""
def relabel_axes(df, meta, sigtested, labels=True):
"""
"""
for axis in ['x', 'y']:
if axis == 'x':
transf_axis = df.index
else:
transf_axis = df.columns
levels = transf_axis.nlevels
axis_meta = 'index-emetas' if axis == 'x' else 'columns-emetas'
for l in range(0, levels):
if not (sigtested and axis == 'y' and l == levels -1):
org_vals = transf_axis.get_level_values(l).tolist()
org_names = [ov.split('|')[0] for ov in org_vals]
org_labs = [ov.split('|')[1] for ov in org_vals]
new_vals = org_labs if labels else org_names
if l > 0:
for no, axmeta in enumerate(meta[axis_meta]):
if axmeta['Type'] != 'Category':
new_vals[no] = axmeta['Type']
new_vals = self._native_stat_names(new_vals)
rename_dict = {old: new for old, new in zip(org_vals, new_vals)}
if axis == 'x':
df.rename(index=rename_dict, inplace=True)
df.index.names = ['Question', 'Values'] * (levels / 2)
else:
df.rename(columns=rename_dict, inplace=True)
if sigtested:
df.columns.names = (['Question', 'Values'] * (levels / 2) +
['Test-IDs'])
else:
df.columns.names = ['Question', 'Values'] * (levels / 2)
return None
def split_tab(tab):
"""
"""
df, meta = tab['df'], tab['tmeta']
mtd_slicer = df.index.get_level_values(0)
meta_limits = list(OrderedDict(
(i, mtd_slicer.tolist().count(i)) for i in mtd_slicer).values())
meta_slices = []
for start, end in enumerate(meta_limits):
if start == 0:
i_0 = 0
else:
i_0 = meta_limits[start-1]
meta_slices.append((i_0, end))
df_slicers = []
for e in mtd_slicer:
if not e in df_slicers:
df_slicers.append(e)
dfs = [df.loc[[s], :].copy() for s in df_slicers]
sub_metas = []
for ms in meta_slices:
all_meta = copy.deepcopy(meta)
idx_meta = all_meta['index-emetas'][ms[0]: ms[1]]
all_meta['index-emetas'] = idx_meta
sub_metas.append(all_meta)
return list(zip(dfs, sub_metas))
def _get_axis_vars(df):
axis_vars = []
for axis in [df.index, df.columns]:
ax_var = [v.split('|')[0] for v in axis.unique().levels[0]]
axis_vars.append(ax_var)
return axis_vars[0][0], axis_vars[1]
def to_chain(basic_chain_defintion, add_chain_meta):
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Dimensions MTD'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
# new_chain._pad_id = None
# new_chain._array_style = None
new_chain._has_rules = False
# new_chain.double_base = False
# new_chain.sig_test_letters = None
# new_chain.totalize = True
# new_chain._meta['var_meta'] = basic_chain_defintion[-1]
# new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
new_chain._views_per_rows()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
def mine_mtd(tab_collection, paint, chain_coll, folder=None):
failed = []
unsupported = []
for name, sub_tab in list(tab_collection.items()):
try:
if isinstance(list(sub_tab.values())[0], dict):
mine_mtd(sub_tab, paint, chain_coll, name)
else:
tabs = split_tab(sub_tab)
chain_dfs = []
for tab in tabs:
df, meta = tab[0], tab[1]
nestex_x = None
nested_y = (df.columns.nlevels % 2 == 0
and df.columns.nlevels > 2)
sigtested = (df.columns.nlevels % 2 != 0
and df.columns.nlevels > 2)
if sigtested:
df = df.swaplevel(0, axis=1).swaplevel(0, 1, 1)
else:
invalid = ['-', '*', '**']
df = df.applymap(
lambda x: float(x.replace(',', '.').replace('%', ''))
if isinstance(x, str) and not x in invalid
else x
)
x, y = _get_axis_vars(df)
df.replace('-', np.NaN, inplace=True)
relabel_axes(df, meta, sigtested, labels=paint)
colbase_l = -2 if sigtested else -1
for base in ['Base', 'UnweightedBase']:
df = df.drop(base, axis=1, level=colbase_l)
chain = to_chain((df, x, y), meta)
chain.name = name
chain_dfs.append(chain)
if not folder:
chain_coll.extend(chain_dfs)
else:
folders = [(i, list(c.keys())[0]) for i, c in
enumerate(chain_coll, 0) if
isinstance(c, dict)]
if folder in [f[1] for f in folders]:
pos = [f[0] for f in folders
if f[1] == folder][0]
chain_coll[pos][folder].extend(chain_dfs)
else:
chain_coll.append({folder: chain_dfs})
except:
failed.append(name)
return chain_coll
chain_coll = []
chains = mine_mtd(mtd_doc, paint, chain_coll)
self.__chains = chains
return self
def from_cmt(self, crunch_tabbook, ignore=None, cell_items='c',
array_summaries=True):
"""
Convert a Crunch multitable document (tabbook) into a collection of
quantipy.Chain representations.
Parameters
----------
crunch_tabbook : ``Tabbook`` object instance
Text
ignore : bool, default False
Text
cell_items : {'c', 'p', 'cp'}, default 'c'
Text
array_summaries : bool, default True
Text
Returns
-------
self : quantipy.ChainManager
Will consist of Quantipy representations of the Crunch table
document.
"""
def cubegroups_to_chain_defs(cubegroups, ci, arr_sum):
"""
Convert CubeGroup DataFrame to a Chain.dataframe.
"""
chain_dfs = []
# DataFrame edits to get basic Chain.dataframe rep.
for idx, cubegroup in enumerate(cubegroups):
cubegroup_df = cubegroup.dataframe
array = cubegroup.is_array
# split arrays into separate dfs / convert to summary df...
if array:
ai_aliases = cubegroup.subref_aliases
array_elements = []
dfs = []
if array_summaries:
arr_sum_df = cubegroup_df.copy().unstack()['All']
arr_sum_df.is_summary = True
x_label = arr_sum_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs.append((arr_sum_df, x_label, x_name))
array_elements = cubegroup_df.index.levels[1].values.tolist()
ai_df = cubegroup_df.copy()
idx = cubegroup_df.index.droplevel(0)
ai_df.index = idx
for array_element, alias in zip(array_elements, ai_aliases):
dfs.append((ai_df.loc[[array_element], :].copy(),
array_element, alias))
else:
x_label = cubegroup_df.index.get_level_values(0).tolist()[0]
x_name = cubegroup.rowdim.alias
dfs = [(cubegroup_df, x_label, x_name)]
# Apply QP-style DataFrame conventions (indexing, names, etc.)
for cgdf, x_var_label, x_var_name in dfs:
is_summary = hasattr(cgdf, 'is_summary')
if is_summary:
cgdf = cgdf.T
y_var_names = ['@']
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
y_var_names = cubegroup.colvars
x_names = ['Question', 'Values']
y_names = ['Question', 'Values']
cgdf.index = cgdf.index.droplevel(0)
# Compute percentages?
if cell_items == 'p': _calc_pct(cgdf)
# Build x-axis multiindex / rearrange "Base" row
idx_vals = cgdf.index.values.tolist()
cgdf = cgdf.reindex([idx_vals[-1]] + idx_vals[:-1])
idx_vals = cgdf.index.values.tolist()
mi_vals = [[x_var_label], self._native_stat_names(idx_vals)]
row_mi = pd.MultiIndex.from_product(mi_vals, names=x_names)
cgdf.index = row_mi
# Build y-axis multiindex
y_vals = [('Total', 'Total') if y[0] == 'All'
else y for y in cgdf.columns.tolist()]
col_mi = pd.MultiIndex.from_tuples(y_vals, names=y_names)
cgdf.columns = col_mi
if is_summary:
cgdf = cgdf.T
chain_dfs.append((cgdf, x_var_name, y_var_names, cubegroup._meta))
return chain_dfs
def _calc_pct(df):
df.iloc[:-1, :] = df.iloc[:-1, :].div(df.iloc[-1, :]) * 100
return None
def to_chain(basic_chain_defintion, add_chain_meta):
"""
"""
new_chain = Chain(None, basic_chain_defintion[1])
new_chain.source = 'Crunch multitable'
new_chain.stack = None
new_chain.painted = True
new_chain._meta = add_chain_meta
new_chain._frame = basic_chain_defintion[0]
new_chain._x_keys = [basic_chain_defintion[1]]
new_chain._y_keys = basic_chain_defintion[2]
new_chain._given_views = None
new_chain._grp_text_map = []
new_chain._text_map = None
new_chain._pad_id = None
new_chain._array_style = None
new_chain._has_rules = False
new_chain.double_base = False
new_chain.sig_test_letters = None
new_chain.totalize = True
new_chain._meta['var_meta'] = basic_chain_defintion[-1]
new_chain._extract_base_descriptions()
new_chain._views = OrderedDict()
for vk in new_chain._views_per_rows():
if not vk in new_chain._views:
new_chain._views[vk] = new_chain._views_per_rows().count(vk)
return new_chain
# self.name = name OK!
# self._meta = Crunch meta OK!
# self._x_keys = None OK!
# self._y_keys = None OK!
# self._frame = None OK!
# self.totalize = False OK! -> But is True!
# self.stack = stack OK! -> N/A
# self._has_rules = None OK! -> N/A
# self.double_base = False OK! -> N/A
# self.sig_test_letters = None OK! -> N/A
# self._pad_id = None OK! -> N/A
# self._given_views = None OK! -> N/A
# self._grp_text_map = [] OK! -> N/A
# self._text_map = None OK! -> N/A
# self.grouping = None ?
# self._group_style = None ?
# self._transl = qp.core.view.View._metric_name_map() * with CMT/MTD
self.source = 'Crunch multitable'
cubegroups = crunch_tabbook.cube_groups
meta = {'display_settings': crunch_tabbook.display_settings,
'weight': crunch_tabbook.weight}
if cell_items == 'c':
meta['display_settings']['countsOrPercents'] = 'counts'
elif cell_items == 'p':
meta['display_settings']['countsOrPercents'] = 'percent'
chain_defs = cubegroups_to_chain_defs(cubegroups, cell_items,
array_summaries)
self.__chains = [to_chain(c_def, meta) for c_def in chain_defs]
return self
# ------------------------------------------------------------------------
def from_cluster(self, clusters):
"""
Create an OrderedDict of ``Cluster`` names storing new ``Chain``\s.
Parameters
----------
clusters : cluster-like ([dict of] quantipy.Cluster)
Text ...
Returns
-------
new_chain_dict : OrderedDict
Text ...
"""
self.source = 'native (old qp.Cluster of qp.Chain)'
qp.set_option('new_chains', True)
def check_cell_items(views):
c = any('counts' in view.split('|')[-1] for view in views)
p = any('c%' in view.split('|')[-1] for view in views)
cp = c and p
if cp:
cell_items = 'counts_colpct'
else:
cell_items = 'counts' if c else 'colpct'
return cell_items
def check_sigtest(views):
"""
"""
levels = []
sigs = [v.split('|')[1] for v in views if v.split('|')[1].startswith('t.')]
for sig in sigs:
l = '0.{}'.format(sig.split('.')[-1])
if not l in levels: levels.append(l)
return levels
def mine_chain_structure(clusters):
cluster_defs = []
for cluster_def_name, cluster in list(clusters.items()):
for name in cluster:
if isinstance(list(cluster[name].items())[0][1], pd.DataFrame):
cluster_def = {'name': name,
'oe': True,
'df': list(cluster[name].items())[0][1],
'filter': chain.filter,
'data_key': chain.data_key}
else:
xs, views, weight = [], [], []
for chain_name, chain in list(cluster[name].items()):
for v in chain.views:
w = v.split('|')[-2]
if w not in weight: weight.append(w)
if v not in views: views.append(v)
xs.append(chain.source_name)
ys = chain.content_of_axis
cluster_def = {'name': '{}-{}'.format(cluster_def_name, name),
'filter': chain.filter,
'data_key': chain.data_key,
'xs': xs,
'ys': ys,
'views': views,
'weight': weight[-1],
'bases': 'both' if len(weight) == 2 else 'auto',
'cell_items': check_cell_items(views),
'tests': check_sigtest(views)}
cluster_defs.append(cluster_def)
return cluster_defs
from quantipy.core.view_generators.view_specs import ViewManager
cluster_specs = mine_chain_structure(clusters)
for cluster_spec in cluster_specs:
oe = cluster_spec.get('oe', False)
if not oe:
vm = ViewManager(self.stack)
vm.get_views(cell_items=cluster_spec['cell_items'],
weight=cluster_spec['weight'],
bases=cluster_spec['bases'],
stats= ['mean', 'stddev', 'median', 'min', 'max'],
tests=cluster_spec['tests'])
self.get(data_key=cluster_spec['data_key'],
filter_key=cluster_spec['filter'],
x_keys = cluster_spec['xs'],
y_keys = cluster_spec['ys'],
views=vm.views,
orient='x',
prioritize=True)
else:
meta = [cluster_spec['data_key'], cluster_spec['filter']]
df, name = cluster_spec['df'], cluster_spec['name']
self.add(df, meta_from=meta, name=name)
return None
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
def _check_keys(self, data_key, keys):
""" Checks given keys exist in meta['columns']
"""
keys = self._force_list(keys)
meta = self.stack[data_key].meta
valid = list(meta['columns'].keys()) + list(meta['masks'].keys())
invalid = ['"%s"' % _ for _ in keys if _ not in valid and _ != _TOTAL]
if invalid:
raise ValueError("Keys %s do not exist in meta['columns'] or "
"meta['masks']." % ", ".join(invalid))
return keys
def add(self, structure, meta_from=None, meta=None, name=None):
""" Add a pandas.DataFrame as a Chain.
Parameters
----------
structure : ``pandas.Dataframe``
The dataframe to add to the ChainManger
meta_from : list, list-like, str, default None
The location of the meta in the stack. Either a list-like object with data key and
filter key or a str as the data key
meta : quantipy meta (dict)
External meta used to paint the frame
name : ``str``, default None
The name to give the resulting chain. If not passed, the name will become
the concatenated column names, delimited by a period
Returns
-------
appended : ``quantipy.ChainManager``
"""
name = name or '.'.join(structure.columns.tolist())
chain = Chain(self.stack, name, structure=structure)
chain._frame = chain.structure
chain._index = chain._frame.index
chain._columns = chain._frame.columns
chain._frame_values = chain._frame.values
if meta_from:
if isinstance(meta_from, str):
chain._meta = self.stack[meta_from].meta
else:
data_key, filter_key = meta_from
chain._meta = self.stack[data_key][filter_key].meta
elif meta:
chain._meta = meta
self.__chains.append(chain)
return self
def get(self, data_key, filter_key, x_keys, y_keys, views, orient='x',
rules=True, rules_weight=None, prioritize=True, folder=None):
"""
TODO: Full doc string
Get a (list of) Chain instance(s) in either 'x' or 'y' orientation.
Chain.dfs will be concatenated along the provided 'orient'-axis.
"""
# TODO: VERIFY data_key
# TODO: VERIFY filter_key
# TODO: Add verbose arg to get()
x_keys = self._check_keys(data_key, x_keys)
y_keys = self._check_keys(data_key, y_keys)
if folder and not isinstance(folder, str):
err = "'folder' must be a name provided as string!"
raise ValueError(err)
if orient == 'x':
it, keys = x_keys, y_keys
else:
it, keys = y_keys, x_keys
for key in it:
x_key, y_key = (key, keys) if orient == 'x' else (keys, key)
chain = Chain(self.stack, key)
chain = chain.get(data_key, filter_key, self._force_list(x_key),
self._force_list(y_key), views, rules=rules,
rules_weight=rules_weight, prioritize=prioritize,
orient=orient)
folders = self.folder_names
if folder in folders:
idx = self._idx_from_name(folder)
self.__chains[idx][folder].append(chain)
else:
if folder:
self.__chains.append({folder: [chain]})
else:
self.__chains.append(chain)
return None
def paint_all(self, *args, **kwargs):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_key : str, default meta['lib']['default text']
The language version of any variable metadata applied.
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.index`` labels
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.columns`` labels
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'full', 'cells'}, default cells
Text
totalize : bool, default False
Text
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
for chain in self:
if isinstance(chain, dict):
for c in list(chain.values())[0]:
c.paint(*args, **kwargs)
else:
chain.paint(*args, **kwargs)
return None
HEADERS = ['header-title',
'header-left',
'header-center',
'header-right']
FOOTERS = ['footer-title',
'footer-left',
'footer-center',
'footer-right']
VALID_ANNOT_TYPES = HEADERS + FOOTERS + ['notes']
VALID_ANNOT_CATS = ['header', 'footer', 'notes']
VALID_ANNOT_POS = ['title',
'left',
'center',
'right']
class ChainAnnotations(dict):
def __init__(self):
super(ChainAnnotations, self).__init__()
self.header_title = []
self.header_left = []
self.header_center = []
self.header_right = []
self.footer_title = []
self.footer_left = []
self.footer_center = []
self.footer_right = []
self.notes = []
for v in VALID_ANNOT_TYPES:
self[v] = []
def __setitem__(self, key, value):
self._test_valid_key(key)
return super(ChainAnnotations, self).__setitem__(key, value)
def __getitem__(self, key):
self._test_valid_key(key)
return super(ChainAnnotations, self).__getitem__(key)
def __repr__(self):
headers = [(h.split('-')[1], self[h]) for h in self.populated if
h.split('-')[0] == 'header']
footers = [(f.split('-')[1], self[f]) for f in self.populated if
f.split('-')[0] == 'footer']
notes = self['notes'] if self['notes'] else []
if notes:
ar = 'Notes\n'
ar += '-{:>16}\n'.format(str(notes))
else:
ar = 'Notes: None\n'
if headers:
ar += 'Headers\n'
for pos, text in list(dict(headers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Headers: None\n'
if footers:
ar += 'Footers\n'
for pos, text in list(dict(footers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Footers: None'
return ar
def _test_valid_key(self, key):
"""
"""
if key not in VALID_ANNOT_TYPES:
splitted = key.split('-')
if len(splitted) > 1:
acat, apos = splitted[0], splitted[1]
else:
acat, apos = key, None
if apos:
if acat == 'notes':
msg = "'{}' annotation type does not support positions!"
msg = msg.format(acat)
elif not acat in VALID_ANNOT_CATS and not apos in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation type!".format(key)
elif acat not in VALID_ANNOT_CATS:
msg = "'{}' is not a valid annotation category!".format(acat)
elif apos not in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation position!".format(apos)
else:
msg = "'{}' is not a valid annotation type!".format(key)
raise KeyError(msg)
@property
def header(self):
h_dict = {}
for h in HEADERS:
if self[h]: h_dict[h.split('-')[1]] = self[h]
return h_dict
@property
def footer(self):
f_dict = {}
for f in FOOTERS:
if self[f]: f_dict[f.split('-')[1]] = self[f]
return f_dict
@property
def populated(self):
"""
The annotation fields that are defined.
"""
return sorted([k for k, v in list(self.items()) if v])
@staticmethod
def _annot_key(a_type, a_pos):
if a_pos:
return '{}-{}'.format(a_type, a_pos)
else:
return a_type
def set(self, text, category='header', position='title'):
"""
Add annotation texts defined by their category and position.
Parameters
----------
category : {'header', 'footer', 'notes'}, default 'header'
Defines if the annotation is treated as a *header*, *footer* or
*note*.
position : {'title', 'left', 'center', 'right'}, default 'title'
Sets the placement of the annotation within its category.
Returns
-------
None
"""
if not category: category = 'header'
if not position and category != 'notes': position = 'title'
if category == 'notes': position = None
akey = self._annot_key(category, position)
self[akey].append(text)
self.__dict__[akey.replace('-', '_')].append(text)
return None
CELL_DETAILS = {'en-GB': {'cc': 'Cell Contents',
'N': 'Counts',
'c%': 'Column Percentages',
'r%': 'Row Percentages',
'str': 'Statistical Test Results',
'cp': 'Column Proportions',
'cm': 'Means',
'stats': 'Statistics',
'mb': 'Minimum Base',
'sb': 'Small Base',
'up': ' indicates result is significantly higher than the result in the Total column',
'down': ' indicates result is significantly lower than the result in the Total column'
},
'fr-FR': {'cc': 'Contenu cellule',
'N': 'Total',
'c%': 'Pourcentage de colonne',
'r%': 'Pourcentage de ligne',
'str': 'Résultats test statistique',
'cp': 'Proportions de colonne',
'cm': 'Moyennes de colonne',
'stats': 'Statistiques',
'mb': 'Base minimum',
'sb': 'Petite base',
'up': ' indique que le résultat est significativement supérieur au résultat de la colonne Total',
'down': ' indique que le résultat est significativement inférieur au résultat de la colonne Total'
}}
class Chain(object):
def __init__(self, stack, name, structure=None):
self.stack = stack
self.name = name
self.structure = structure
self.source = 'native'
self.edited = False
self._custom_views = None
self.double_base = False
self.grouping = None
self.sig_test_letters = None
self.totalize = False
self.base_descriptions = None
self.painted = False
self.hidden = False
self.annotations = ChainAnnotations()
self._array_style = None
self._group_style = None
self._meta = None
self._x_keys = None
self._y_keys = None
self._given_views = None
self._grp_text_map = []
self._text_map = None
self._custom_texts = {}
self._transl = qp.core.view.View._metric_name_map()
self._pad_id = None
self._frame = None
self._has_rules = None
self._flag_bases = None
self._is_mask_item = False
self._shapes = None
class _TransformedChainDF(object):
"""
"""
def __init__(self, chain):
c = chain.clone()
self.org_views = c.views
self.df = c._frame
self._org_idx = self.df.index
self._edit_idx = list(range(0, len(self._org_idx)))
self._idx_valmap = {n: o for n, o in
zip(self._edit_idx,
self._org_idx.get_level_values(1))}
self.df.index = self._edit_idx
self._org_col = self.df.columns
self._edit_col = list(range(0, len(self._org_col)))
self._col_valmap = {n: o for n, o in
zip(self._edit_col,
self._org_col.get_level_values(1))}
self.df.columns = self._edit_col
self.array_mi = c._array_style == 0
self.nested_y = c._nested_y
self._nest_mul = self._nesting_multiplier()
return None
def _nesting_multiplier(self):
"""
"""
levels = self._org_col.nlevels
if levels == 2:
return 1
else:
return (levels / 2) + 1
def _insert_viewlikes(self, new_index_flat, org_index_mapped):
inserts = [new_index_flat.index(val) for val in new_index_flat
if not val in list(org_index_mapped.values())]
flatviews = []
for name, no in list(self.org_views.items()):
e = [name] * no
flatviews.extend(e)
for vno, i in enumerate(inserts):
flatviews.insert(i, '__viewlike__{}'.format(vno))
new_views = OrderedDict()
no_of_views = Counter(flatviews)
for fv in flatviews:
if not fv in new_views: new_views[fv] = no_of_views[fv]
return new_views
def _updated_index_tuples(self, axis):
"""
"""
if axis == 1:
current = self.df.columns.values.tolist()
mapped = self._col_valmap
org_tuples = self._org_col.tolist()
else:
current = self.df.index.values.tolist()
mapped = self._idx_valmap
org_tuples = self._org_idx.tolist()
merged = [mapped[val] if val in mapped else val for val in current]
# ================================================================
if (self.array_mi and axis == 1) or axis == 0:
self._transf_views = self._insert_viewlikes(merged, mapped)
else:
self._transf_views = self.org_views
# ================================================================
i = d = 0
new_tuples = []
for merged_val in merged:
idx = i-d if i-d != len(org_tuples) else i-d-1
if org_tuples[idx][1] == merged_val:
new_tuples.append(org_tuples[idx])
else:
empties = ['*'] * self._nest_mul
new_tuple = tuple(empties + [merged_val])
new_tuples.append(new_tuple)
d += 1
i += 1
return new_tuples
def _reindex(self):
"""
"""
y_names = ['Question', 'Values']
if not self.array_mi:
x_names = y_names
else:
x_names = ['Array', 'Questions']
if self.nested_y: y_names = y_names * (self._nest_mul - 1)
tuples = self._updated_index_tuples(axis=1)
self.df.columns = pd.MultiIndex.from_tuples(tuples, names=y_names)
tuples = self._updated_index_tuples(axis=0)
self.df.index = pd.MultiIndex.from_tuples(tuples, names=x_names)
return None
def export(self):
"""
"""
return self._TransformedChainDF(self)
def assign(self, transformed_chain_df):
"""
"""
if not isinstance(transformed_chain_df, self._TransformedChainDF):
raise ValueError("Must pass an exported ``Chain`` instance!")
transformed_chain_df._reindex()
self._frame = transformed_chain_df.df
self.views = transformed_chain_df._transf_views
return None
def __str__(self):
if self.structure is not None:
return '%s...\n%s' % (self.__class__.__name__, str(self.structure.head()))
str_format = ('%s...'
'\nSource: %s'
'\nName: %s'
'\nOrientation: %s'
'\nX: %s'
'\nY: %s'
'\nNumber of views: %s')
return str_format % (self.__class__.__name__,
getattr(self, 'source', 'native'),
getattr(self, 'name', 'None'),
getattr(self, 'orientation', 'None'),
getattr(self, '_x_keys', 'None'),
getattr(self, '_y_keys', 'None'),
getattr(self, 'views', 'None'))
def __repr__(self):
return self.__str__()
def __len__(self):
"""Returns the total number of cells in the Chain.dataframe"""
return (len(getattr(self, 'index', [])) * len(getattr(self, 'columns', [])))
def clone(self):
"""
"""
return copy.deepcopy(self)
@lazy_property
def _default_text(self):
tk = self._meta['lib']['default text']
if tk not in self._transl:
self._transl[tk] = self._transl['en-GB']
return tk
@lazy_property
def orientation(self):
""" TODO: doc string
"""
if len(self._x_keys) == 1 and len(self._y_keys) == 1:
return 'x'
elif len(self._x_keys) == 1:
return 'x'
elif len(self._y_keys) == 1:
return 'y'
if len(self._x_keys) > 1 and len(self._y_keys) > 1:
return None
@lazy_property
def axis(self):
# TODO: name appropriate?
return int(self.orientation=='x')
@lazy_property
def axes(self):
# TODO: name appropriate?
if self.axis == 1:
return self._x_keys, self._y_keys
return self._y_keys, self._x_keys
@property
def dataframe(self):
return self._frame
@property
def index(self):
return self._index
@index.setter
def index(self, index):
self._index = index
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, columns):
self._columns = columns
@property
def frame_values(self):
return self._frame_values
@frame_values.setter
def frame_values(self, frame_values):
self._frame_values = frame_values
@property
def views(self):
return self._views
@views.setter
def views(self, views):
self._views = views
@property
def array_style(self):
return self._array_style
@property
def shapes(self):
if self._shapes is None:
self._shapes = []
return self._shapes
@array_style.setter
def array_style(self, link):
array_style = -1
for view in list(link.keys()):
if link[view].meta()['x']['is_array']:
array_style = 0
if link[view].meta()['y']['is_array']:
array_style = 1
self._array_style = array_style
@property
def pad_id(self):
if self._pad_id is None:
self._pad_id = 0
else:
self._pad_id += 1
return self._pad_id
@property
def sig_levels(self):
sigs = set([v for v in self._valid_views(True)
if v.split('|')[1].startswith('t.')])
tests = [t.split('|')[1].split('.')[1] for t in sigs]
levels = [t.split('|')[1].split('.')[3] for t in sigs]
sig_levels = {}
for m in zip(tests, levels):
l = '.{}'.format(m[1])
t = m[0]
if t in sig_levels:
sig_levels[t].append(l)
else:
sig_levels[t] = [l]
return sig_levels
@property
def cell_items(self):
if self.views:
compl_views = [v for v in self.views if ']*:' in v]
check_views = compl_views[:] or self.views.copy()
for v in check_views:
if v.startswith('__viewlike__'):
if compl_views:
check_views.remove(v)
else:
del check_views[v]
non_freqs = ('d.', 't.')
c = any(v.split('|')[3] == '' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
col_pct = any(v.split('|')[3] == 'y' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
row_pct = any(v.split('|')[3] == 'x' and
not v.split('|')[1].startswith(non_freqs) and
not v.split('|')[-1].startswith('cbase')
for v in check_views)
c_colpct = c and col_pct
c_rowpct = c and row_pct
c_colrow_pct = c_colpct and c_rowpct
single_ci = not (c_colrow_pct or c_colpct or c_rowpct)
if single_ci:
if c:
return 'counts'
elif col_pct:
return 'colpct'
else:
return 'rowpct'
else:
if c_colrow_pct:
return 'counts_colpct_rowpct'
elif c_colpct:
if self._counts_first():
return 'counts_colpct'
else:
return 'colpct_counts'
else:
return 'counts_rowpct'
@property
def _ci_simple(self):
ci = []
if self.views:
for v in self.views:
if 'significance' in v:
continue
if ']*:' in v:
if v.split('|')[3] == '':
if 'N' not in ci:
ci.append('N')
if v.split('|')[3] == 'y':
if 'c%' not in ci:
ci.append('c%')
if v.split('|')[3] == 'x':
if 'r%' not in ci:
ci.append('r%')
else:
if v.split('|')[-1] == 'counts':
if 'N' not in ci:
ci.append('N')
elif v.split('|')[-1] == 'c%':
if 'c%' not in ci:
ci.append('c%')
elif v.split('|')[-1] == 'r%':
if 'r%' not in ci:
ci.append('r%')
return ci
@property
def ci_count(self):
return len(self.cell_items.split('_'))
@property
def contents(self):
if self.structure:
return
nested = self._array_style == 0
if nested:
dims = self._frame.shape
contents = {row: {col: {} for col in range(0, dims[1])}
for row in range(0, dims[0])}
else:
contents = dict()
for row, idx in enumerate(self._views_per_rows()):
if nested:
for i, v in list(idx.items()):
contents[row][i] = self._add_contents(v)
else:
contents[row] = self._add_contents(idx)
return contents
@property
def cell_details(self):
lang = self._default_text if self._default_text == 'fr-FR' else 'en-GB'
cd = CELL_DETAILS[lang]
ci = self.cell_items
cd_str = '%s (%s)' % (cd['cc'], ', '.join([cd[_] for _ in self._ci_simple]))
against_total = False
if self.sig_test_letters:
mapped = ''
group = None
i = 0 if (self._frame.columns.nlevels in [2, 3]) else 4
for letter, lab in zip(self.sig_test_letters, self._frame.columns.codes[-i]):
if letter == '@':
continue
if group is not None:
if lab == group:
mapped += '/' + letter
else:
group = lab
mapped += ', ' + letter
else:
group = lab
mapped += letter
test_types = cd['cp']
if self.sig_levels.get('means'):
test_types += ', ' + cd['cm']
levels = []
for key in ('props', 'means'):
for level in self.sig_levels.get(key, iter(())):
l = '%s%%' % int(100. - float(level.split('+@')[0].split('.')[1]))
if l not in levels:
levels.append(l)
if '+@' in level:
against_total = True
cd_str = cd_str[:-1] + ', ' + cd['str'] +'), '
cd_str += '%s (%s, (%s): %s' % (cd['stats'], test_types, ', '.join(levels), mapped)
if self._flag_bases:
flags = ([], [])
[(flags[0].append(min), flags[1].append(small)) for min, small in self._flag_bases]
cd_str += ', %s: %s (**), %s: %s (*)' % (cd['mb'], ', '.join(map(str, flags[0])),
cd['sb'], ', '.join(map(str, flags[1])))
cd_str += ')'
cd_str = [cd_str]
if against_total:
cd_str.extend([cd['up'], cd['down']])
return cd_str
def describe(self):
def _describe(cell_defs, row_id):
descr = []
for r, m in list(cell_defs.items()):
descr.append(
[k if isinstance(v, bool) else v for k, v in list(m.items()) if v])
if any('is_block' in d for d in descr):
blocks = self._describe_block(descr, row_id)
calc = 'calc' in blocks
for d, b in zip(descr, blocks):
if b:
d.append(b) if not calc else d.extend([b, 'has_calc'])
return descr
if self._array_style == 0:
description = {k: _describe(v, k) for k, v in list(self.contents.items())}
else:
description = _describe(self.contents, None)
return description
def _fill_cells(self):
"""
"""
self._frame = self._frame.fillna(method='ffill')
return None
# @lazy_property
def _counts_first(self):
for v in self.views:
sname = v.split('|')[-1]
if sname in ['counts', 'c%']:
if sname == 'counts':
return True
else:
return False
#@property
def _views_per_rows(self):
"""
"""
base_vk = 'x|f|x:||{}|cbase'
counts_vk = 'x|f|:||{}|counts'
pct_vk = 'x|f|:|y|{}|c%'
mean_vk = 'x|d.mean|:|y|{}|mean'
stddev_vk = 'x|d.stddev|:|y|{}|stddev'
variance_vk = 'x|d.var|:|y|{}|var'
sem_vk = 'x|d.sem|:|y|{}|sem'
if self.source == 'Crunch multitable':
ci = self._meta['display_settings']['countsOrPercents']
w = self._meta['weight']
if ci == 'counts':
main_vk = counts_vk.format(w if w else '')
else:
main_vk = pct_vk.format(w if w else '')
base_vk = base_vk.format(w if w else '')
metrics = [base_vk] + (len(self.dataframe.index)-1) * [main_vk]
elif self.source == 'Dimensions MTD':
ci = self._meta['cell_items']
w = None
axis_vals = [axv['Type'] for axv in self._meta['index-emetas']]
metrics = []
for axis_val in axis_vals:
if axis_val == 'Base':
metrics.append(base_vk.format(w if w else ''))
if axis_val == 'UnweightedBase':
metrics.append(base_vk.format(w if w else ''))
elif axis_val == 'Category':
metrics.append(counts_vk.format(w if w else ''))
elif axis_val == 'Mean':
metrics.append(mean_vk.format(w if w else ''))
elif axis_val == 'StdDev':
metrics.append(stddev_vk.format(w if w else ''))
elif axis_val == 'StdErr':
metrics.append(sem_vk.format(w if w else ''))
elif axis_val == 'SampleVar':
metrics.append(variance_vk.format(w if w else ''))
return metrics
else:
# Native Chain views
# ----------------------------------------------------------------
if self.edited and (self._custom_views and not self.array_style == 0):
return self._custom_views
else:
if self._array_style != 0:
metrics = []
if self.orientation == 'x':
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = self.views[initial]
metrics.extend(view * size)
else:
for view_part in self.views:
for view in self._valid_views():
view = self._force_list(view)
initial = view[0]
size = view_part[initial]
metrics.extend(view * size)
else:
counts = []
colpcts = []
rowpcts = []
metrics = []
ci = self.cell_items
for v in list(self.views.keys()):
if not v.startswith('__viewlike__'):
parts = v.split('|')
is_completed = ']*:' in v
if not self._is_c_pct(parts):
counts.extend([v]*self.views[v])
if self._is_r_pct(parts):
rowpcts.extend([v]*self.views[v])
if (self._is_c_pct(parts) or self._is_base(parts) or
self._is_stat(parts)):
colpcts.extend([v]*self.views[v])
else:
counts = counts + ['__viewlike__']
colpcts = colpcts + ['__viewlike__']
rowpcts = rowpcts + ['__viewlike__']
dims = self._frame.shape
for row in range(0, dims[0]):
if ci in ['counts_colpct', 'colpct_counts'] and self.grouping:
if row % 2 == 0:
if self._counts_first():
vc = counts
else:
vc = colpcts
else:
if not self._counts_first():
vc = counts
else:
vc = colpcts
else:
vc = counts if ci == 'counts' else colpcts
metrics.append({col: vc[col] for col in range(0, dims[1])})
return metrics
def _valid_views(self, flat=False):
clean_view_list = []
valid = list(self.views.keys())
org_vc = self._given_views
v_likes = [v for v in valid if v.startswith('__viewlike__')]
if isinstance(org_vc, tuple):
v_likes = tuple(v_likes)
view_coll = org_vc + v_likes
for v in view_coll:
if isinstance(v, str):
if v in valid:
clean_view_list.append(v)
else:
new_v = []
for sub_v in v:
if sub_v in valid:
new_v.append(sub_v)
if isinstance(v, tuple):
new_v = list(new_v)
if new_v:
if len(new_v) == 1: new_v = new_v[0]
if not flat:
clean_view_list.append(new_v)
else:
if isinstance(new_v, list):
clean_view_list.extend(new_v)
else:
clean_view_list.append(new_v)
return clean_view_list
def _add_contents(self, viewelement):
"""
"""
if viewelement.startswith('__viewlike__'):
parts = '|||||'
viewlike = True
else:
parts = viewelement.split('|')
viewlike = False
return dict(is_default=self._is_default(parts),
is_c_base=self._is_c_base(parts),
is_r_base=self._is_r_base(parts),
is_e_base=self._is_e_base(parts),
is_c_base_gross=self._is_c_base_gross(parts),
is_counts=self._is_counts(parts),
is_c_pct=self._is_c_pct(parts),
is_r_pct=self._is_r_pct(parts),
is_res_c_pct=self._is_res_c_pct(parts),
is_counts_sum=self._is_counts_sum(parts),
is_c_pct_sum=self._is_c_pct_sum(parts),
is_counts_cumsum=self._is_counts_cumsum(parts),
is_c_pct_cumsum=self._is_c_pct_cumsum(parts),
is_net=self._is_net(parts),
is_block=self._is_block(parts),
is_calc_only = self._is_calc_only(parts),
is_mean=self._is_mean(parts),
is_stddev=self._is_stddev(parts),
is_min=self._is_min(parts),
is_max=self._is_max(parts),
is_median=self._is_median(parts),
is_variance=self._is_variance(parts),
is_sem=self._is_sem(parts),
is_varcoeff=self._is_varcoeff(parts),
is_percentile=self._is_percentile(parts),
is_propstest=self._is_propstest(parts),
is_meanstest=self._is_meanstest(parts),
is_weighted=self._is_weighted(parts),
weight=self._weight(parts),
is_stat=self._is_stat(parts),
stat=self._stat(parts),
siglevel=self._siglevel(parts),
is_viewlike=viewlike)
def _row_pattern(self, target_ci):
"""
"""
cisplit = self.cell_items.split('_')
if target_ci == 'c%':
start = cisplit.index('colpct')
elif target_ci == 'counts':
start = cisplit.index('counts')
repeat = self.ci_count
return (start, repeat)
def _view_idxs(self, view_tags, keep_tests=True, keep_bases=True, names=False, ci=None):
"""
"""
if not isinstance(view_tags, list): view_tags = [view_tags]
rowmeta = self.named_rowmeta
nested = self.array_style == 0
if nested:
if self.ci_count > 1:
rp_idx = self._row_pattern(ci)[0]
rowmeta = rowmeta[rp_idx]
else:
rp_idx = 0
rowmeta = rowmeta[0]
rows = []
for r in rowmeta:
is_code = str(r[0]).isdigit()
if 'is_counts' in r[1] and is_code:
rows.append(('counts', r[1]))
elif 'is_c_pct' in r[1] and is_code:
rows.append(('c%', r[1]))
elif 'is_propstest' in r[1]:
rows.append((r[0], r[1]))
elif 'is_meanstest' in r[1]:
rows.append((r[0], r[1]))
else:
rows.append(r)
invalids = []
if not keep_tests:
invalids.extend(['is_propstest', 'is_meanstest'])
if ci == 'counts':
invalids.append('is_c_pct')
elif ci == 'c%':
invalids.append('is_counts')
idxs = []
names = []
order = []
for i, row in enumerate(rows):
if any([invalid in row[1] for invalid in invalids]):
if not (row[0] == 'All' and keep_bases): continue
if row[0] in view_tags:
order.append(view_tags.index(row[0]))
idxs.append(i)
if nested:
names.append(self._views_per_rows()[rp_idx][i])
else:
names.append(self._views_per_rows()[i])
return (idxs, order) if not names else (idxs, names, order)
@staticmethod
def _remove_grouped_blanks(viewindex_labs):
"""
"""
full = []
for v in viewindex_labs:
if v == '':
full.append(last)
else:
last = v
full.append(last)
return full
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _non_grouped_axis(self):
"""
"""
axis = self._frame.index
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = self._remove_grouped_blanks(l_one)
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
self._frame.index = pd.MultiIndex.from_tuples(axis_tuples, names=names)
return None
@property
def named_rowmeta(self):
if self.painted:
self.toggle_labels()
d = self.describe()
if self.array_style == 0:
n = self._frame.columns.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = {rowid: list(zip(n, rowmeta)) for rowid, rowmeta in list(d.items())}
else:
n = self._frame.index.get_level_values(1).values.tolist()
n = self._remove_grouped_blanks(n)
mapped = list(zip(n, d))
if not self.painted: self.toggle_labels()
return mapped
@lazy_property
def _nested_y(self):
return any('>' in v for v in self._y_keys)
def _is_default(self, parts):
return parts[-1] == 'default'
def _is_c_base(self, parts):
return parts[-1] == 'cbase'
def _is_r_base(self, parts):
return parts[-1] == 'rbase'
def _is_e_base(self, parts):
return parts[-1] == 'ebase'
def _is_c_base_gross(self, parts):
return parts[-1] == 'cbase_gross'
def _is_base(self, parts):
return (self._is_c_base(parts) or
self._is_c_base_gross(parts) or
self._is_e_base(parts) or
self._is_r_base(parts))
def _is_counts(self, parts):
return parts[1].startswith('f') and parts[3] == ''
def _is_c_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'y'
def _is_r_pct(self, parts):
return parts[1].startswith('f') and parts[3] == 'x'
def _is_res_c_pct(self, parts):
return parts[-1] == 'res_c%'
def _is_net(self, parts):
return parts[1].startswith(('f', 'f.c:f', 't.props')) and \
len(parts[2]) > 3 and not parts[2] == 'x++'
def _is_calc_only(self, parts):
if self._is_net(parts) and not self._is_block(parts):
return ((self.__has_freq_calc(parts) or
self.__is_calc_only_propstest(parts)) and not
(self._is_counts_sum(parts) or self._is_c_pct_sum(parts)))
else:
return False
def _is_block(self, parts):
if self._is_net(parts):
conditions = parts[2].split('[')
multiple_conditions = len(conditions) > 2
expand = '+{' in parts[2] or '}+' in parts[2]
complete = '*:' in parts[2]
if expand or complete:
return True
if multiple_conditions:
if self.__has_operator_expr(parts):
return True
return False
return False
return False
def _stat(self, parts):
if parts[1].startswith('d.'):
return parts[1].split('.')[-1]
else:
return None
# non-meta relevant helpers
def __has_operator_expr(self, parts):
e = parts[2]
for syntax in [']*:', '[+{', '}+']:
if syntax in e: e = e.replace(syntax, '')
ops = ['+', '-', '*', '/']
return any(len(e.split(op)) > 1 for op in ops)
def __has_freq_calc(self, parts):
return parts[1].startswith('f.c:f')
def __is_calc_only_propstest(self, parts):
return self._is_propstest(parts) and self.__has_operator_expr(parts)
@staticmethod
def _statname(parts):
split = parts[1].split('.')
if len(split) > 1:
return split[1]
return split[-1]
def _is_mean(self, parts):
return self._statname(parts) == 'mean'
def _is_stddev(self, parts):
return self._statname(parts) == 'stddev'
def _is_min(self, parts):
return self._statname(parts) == 'min'
def _is_max(self, parts):
return self._statname(parts) == 'max'
def _is_median(self, parts):
return self._statname(parts) == 'median'
def _is_variance(self, parts):
return self._statname(parts) == 'var'
def _is_sem(self, parts):
return self._statname(parts) == 'sem'
def _is_varcoeff(self, parts):
return self._statname(parts) == 'varcoeff'
def _is_percentile(self, parts):
return self._statname(parts) in ['upper_q', 'lower_q', 'median']
def _is_counts_sum(self, parts):
return parts[-1].endswith('counts_sum')
def _is_c_pct_sum(self, parts):
return parts[-1].endswith('c%_sum')
def _is_counts_cumsum(self, parts):
return parts[-1].endswith('counts_cumsum')
def _is_c_pct_cumsum(self, parts):
return parts[-1].endswith('c%_cumsum')
def _is_weighted(self, parts):
return parts[4] != ''
def _weight(self, parts):
if parts[4] != '':
return parts[4]
else:
return None
def _is_stat(self, parts):
return parts[1].startswith('d.')
def _is_propstest(self, parts):
return parts[1].startswith('t.props')
def _is_meanstest(self, parts):
return parts[1].startswith('t.means')
def _siglevel(self, parts):
if self._is_meanstest(parts) or self._is_propstest(parts):
return parts[1].split('.')[-1]
else:
return None
def _describe_block(self, description, row_id):
if self.painted:
repaint = True
self.toggle_labels()
else:
repaint = False
vpr = self._views_per_rows()
if row_id is not None:
vpr = [v[1] for v in list(vpr[row_id].items())]
idx = self.dataframe.columns.get_level_values(1).tolist()
else:
idx = self.dataframe.index.get_level_values(1).tolist()
idx_view_map = list(zip(idx, vpr))
block_net_vk = [v for v in vpr if len(v.split('|')[2].split('['))>2 or
'[+{' in v.split('|')[2] or '}+]' in v.split('|')[2]]
has_calc = any([v.split('|')[1].startswith('f.c') for v in block_net_vk])
is_tested = any(v.split('|')[1].startswith('t.props') for v in vpr)
if block_net_vk:
expr = block_net_vk[0].split('|')[2]
expanded_codes = set(map(int, re.findall(r'\d+', expr)))
else:
expanded_codes = []
for idx, m in enumerate(idx_view_map):
if idx_view_map[idx][0] == '':
idx_view_map[idx] = (idx_view_map[idx-1][0], idx_view_map[idx][1])
for idx, row in enumerate(description):
if not 'is_block' in row:
idx_view_map[idx] = None
blocks_len = len(expr.split('],')) * (self.ci_count + is_tested)
if has_calc: blocks_len -= (self.ci_count + is_tested)
block_net_def = []
described_nets = 0
for e in idx_view_map:
if e:
if isinstance(e[0], str):
if has_calc and described_nets == blocks_len:
block_net_def.append('calc')
else:
block_net_def.append('net')
described_nets += 1
else:
code = int(e[0])
if code in expanded_codes:
block_net_def.append('expanded')
else:
block_net_def.append('normal')
else:
block_net_def.append(e)
if repaint: self.toggle_labels()
return block_net_def
def get(self, data_key, filter_key, x_keys, y_keys, views, rules=False,
rules_weight=None, orient='x', prioritize=True):
""" Get the concatenated Chain.DataFrame
"""
self._meta = self.stack[data_key].meta
self._given_views = views
self._x_keys = x_keys
self._y_keys = y_keys
concat_axis = 0
if rules:
if not isinstance(rules, list):
self._has_rules = ['x', 'y']
else:
self._has_rules = rules
# use_views = views[:]
# for first in self.axes[0]:
# for second in self.axes[1]:
# link = self._get_link(data_key, filter_key, first, second)
# for v in use_views:
# if v not in link:
# use_views.remove(v)
for first in self.axes[0]:
found = []
x_frames = []
for second in self.axes[1]:
if self.axis == 1:
link = self._get_link(data_key, filter_key, first, second)
else:
link = self._get_link(data_key, filter_key, second, first)
if link is None:
continue
if prioritize: link = self._drop_substituted_views(link)
found_views, y_frames = self._concat_views(
link, views, rules_weight)
found.append(found_views)
try:
if self._meta['columns'][link.x].get('parent'):
self._is_mask_item = True
except KeyError:
pass
# TODO: contains arrary summ. attr.
# TODO: make this work y_frames = self._pad_frames(y_frames)
self.array_style = link
if self.array_style > -1:
concat_axis = 1 if self.array_style == 0 else 0
y_frames = self._pad_frames(y_frames)
x_frames.append(pd.concat(y_frames, axis=concat_axis))
self.shapes.append(x_frames[-1].shape)
self._frame = pd.concat(self._pad(x_frames), axis=self.axis)
if self._group_style == 'reduced' and self.array_style >- 1:
scan_views = [v if isinstance(v, (tuple, list)) else [v]
for v in self._given_views]
scan_views = [v for v in scan_views if len(v) > 1]
no_tests = []
for scan_view in scan_views:
new_views = []
for view in scan_view:
if not view.split('|')[1].startswith('t.'):
new_views.append(view)
no_tests.append(new_views)
cond = any(len(v) >= 2 for v in no_tests)
if cond:
self._frame = self._reduce_grouped_index(self._frame, 2, self._array_style)
if self.axis == 1:
self.views = found[-1]
else:
self.views = found
self.double_base = len([v for v in self.views
if v.split('|')[-1] == 'cbase']) > 1
self._index = self._frame.index
self._columns = self._frame.columns
self._extract_base_descriptions()
del self.stack
return self
def _toggle_bases(self, keep_weighted=True):
df = self._frame
is_array = self._array_style == 0
contents = self.contents[0] if is_array else self.contents
has_wgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and v['is_weighted']]
has_unwgt_b = [k for k, v in list(contents.items())
if v['is_c_base'] and not v['is_weighted']]
if not (has_wgt_b and has_unwgt_b):
return None
if keep_weighted:
drop_rows = has_unwgt_b
names = ['x|f|x:|||cbase']
else:
drop_rows = has_wgt_b
names = ['x|f|x:||{}|cbase'.format(list(contents.values())[0]['weight'])]
for v in self.views.copy():
if v in names:
del self._views[v]
df = self._frame
if is_array:
cols = [col for x, col in enumerate(df.columns.tolist())
if not x in drop_rows]
df = df.loc[:, cols]
else:
rows = [row for x, row in enumerate(df.index.tolist())
if not x in drop_rows]
df = df.loc[rows, :]
self._frame = df
self._index = df.index
self._columns = df.columns
return None
def _slice_edited_index(self, axis, positions):
"""
"""
l_zero = axis.get_level_values(0).values.tolist()[0]
l_one = axis.get_level_values(1).values.tolist()
l_one = [l_one[p] for p in positions]
axis_tuples = [(l_zero, lab) for lab in l_one]
if self.array_style == 0:
names = ['Array', 'Questions']
else:
names = ['Question', 'Values']
return pd.MultiIndex.from_tuples(axis_tuples, names=names)
def _drop_substituted_views(self, link):
if any(isinstance(sect, (list, tuple)) for sect in self._given_views):
chain_views = list(chain.from_iterable(self._given_views))
else:
chain_views = self._given_views
has_compl = any(']*:' in vk for vk in link)
req_compl = any(']*:' in vk for vk in chain_views)
has_cumsum = any('++' in vk for vk in link)
req_cumsum = any('++' in vk for vk in chain_views)
if (has_compl and req_compl) or (has_cumsum and req_cumsum):
new_link = copy.copy(link)
views = []
for vk in link:
vksplit = vk.split('|')
method, cond, name = vksplit[1], vksplit[2], vksplit[-1]
full_frame = name in ['counts', 'c%']
basic_sigtest = method.startswith('t.') and cond == ':'
if not full_frame and not basic_sigtest: views.append(vk)
for vk in link:
if vk not in views: del new_link[vk]
return new_link
else:
return link
def _pad_frames(self, frames):
""" TODO: doc string
"""
empty_frame = lambda f: pd.DataFrame(index=f.index, columns=f.columns)
max_lab = max(f.axes[self.array_style].size for f in frames)
for e, f in enumerate(frames):
size = f.axes[self.array_style].size
if size < max_lab:
f = pd.concat([f, empty_frame(f)], axis=self.array_style)
order = [None] * (size * 2)
order[::2] = list(range(size))
order[1::2] = list(range(size, size * 2))
if self.array_style == 0:
frames[e] = f.iloc[order, :]
else:
frames[e] = f.iloc[:, order]
return frames
def _get_link(self, data_key, filter_key, x_key, y_key):
"""
"""
base = self.stack[data_key][filter_key]
if x_key in base:
base = base[x_key]
if y_key in base:
return base[y_key]
else:
if self._array_style == -1:
self._y_keys.remove(y_key)
else:
self._x_keys.remove(x_key)
return None
def _index_switch(self, axis):
""" Returns self.dataframe/frame index/ columns based on given x/ y
"""
return dict(x=self._frame.index, y=self._frame.columns).get(axis)
def _pad(self, frames):
""" Pad index/ columns when nlevels is less than the max nlevels
in list of dataframes.
"""
indexes = []
max_nlevels = [max(f.axes[i].nlevels for f in frames) for i in (0, 1)]
for e, f in enumerate(frames):
indexes = []
for i in (0, 1):
if f.axes[i].nlevels < max_nlevels[i]:
indexes.append(self._pad_index(f.axes[i], max_nlevels[i]))
else:
indexes.append(f.axes[i])
frames[e].index, frames[e].columns = indexes
return frames
def _pad_index(self, index, size):
""" Add levels to columns MultiIndex so the nlevels matches
the biggest columns MultiIndex in DataFrames to be concatenated.
"""
pid = self.pad_id
pad = ((size - index.nlevels) // 2)
fill = int((pad % 2) == 1)
names = list(index.names)
names[0:0] = names[:2] * pad
arrays = self._lzip(index.values)
arrays[0:0] = [tuple('#pad-%s' % pid for _ in arrays[i])
for i in range(pad + fill)] * pad
return pd.MultiIndex.from_arrays(arrays, names=names)
@staticmethod
def _reindx_source(df, varname, total):
"""
"""
df.index = df.index.set_levels([varname], level=0, inplace=False)
if df.columns.get_level_values(0).tolist()[0] != varname and total:
df.columns = df.columns.set_levels([varname], level=0, inplace=False)
return df
def _concat_views(self, link, views, rules_weight, found=None):
""" Concatenates the Views of a Chain.
"""
frames = []
totals = [[_TOTAL]] * 2
if found is None:
found = OrderedDict()
if self._text_map is None:
self._text_map = dict()
for view in views:
try:
self.array_style = link
if isinstance(view, (list, tuple)):
if not self.grouping:
self.grouping = True
if isinstance(view, tuple):
self._group_style = 'reduced'
else:
self._group_style = 'normal'
if self.array_style > -1:
use_grp_type = 'normal'
else:
use_grp_type = self._group_style
found, grouped = self._concat_views(link, view, rules_weight, found=found)
if grouped:
frames.append(self._group_views(grouped, use_grp_type))
else:
agg = link[view].meta()['agg']
is_descriptive = agg['method'] == 'descriptives'
is_base = agg['name'] in ['cbase', 'rbase', 'ebase', 'cbase_gross']
is_sum = agg['name'] in ['counts_sum', 'c%_sum']
is_net = link[view].is_net()
oth_src = link[view].has_other_source()
no_total_sign = is_descriptive or is_base or is_sum or is_net
if link[view]._custom_txt and is_descriptive:
statname = agg['fullname'].split('|')[1].split('.')[1]
if not statname in self._custom_texts:
self._custom_texts[statname] = []
self._custom_texts[statname].append(link[view]._custom_txt)
if is_descriptive:
text = agg['name']
try:
self._text_map.update({agg['name']: text})
except AttributeError:
self._text_map = {agg['name']: text}
if agg['text']:
name = dict(cbase='All').get(agg['name'], agg['name'])
try:
self._text_map.update({name: agg['text']})
except AttributeError:
self._text_map = {name: agg['text'],
_TOTAL: 'Total'}
if agg['grp_text_map']:
# try:
if not agg['grp_text_map'] in self._grp_text_map:
self._grp_text_map.append(agg['grp_text_map'])
# except AttributeError:
# self._grp_text_map = [agg['grp_text_map']]
frame = link[view].dataframe
if oth_src:
frame = self._reindx_source(frame, link.x, link.y == _TOTAL)
# RULES SECTION
# ========================================================
# TODO: DYNAMIC RULES:
# - all_rules_axes, rules_weight must be provided not hardcoded
# - Review copy/pickle in original version!!!
rules_weight = None
if self._has_rules:
rules = Rules(link, view, self._has_rules, rules_weight)
# print rules.show_rules()
# rules.get_slicer()
# print rules.show_slicers()
rules.apply()
frame = rules.rules_df()
# ========================================================
if not no_total_sign and (link.x == _TOTAL or link.y == _TOTAL):
if link.x == _TOTAL:
level_names = [[link.y], ['@']]
elif link.y == _TOTAL:
level_names = [[link.x], ['@']]
try:
frame.columns.set_levels(level_names, level=[0, 1],
inplace=True)
except ValueError:
pass
frames.append(frame)
if view not in found:
if self._array_style != 0:
found[view] = len(frame.index)
else:
found[view] = len(frame.columns)
if link[view]._kwargs.get('flag_bases'):
flag_bases = link[view]._kwargs['flag_bases']
try:
if flag_bases not in self._flag_bases:
self._flag_bases.append(flag_bases)
except TypeError:
self._flag_bases = [flag_bases]
except KeyError:
pass
return found, frames
@staticmethod
def _temp_nest_index(df):
"""
Flatten the nested MultiIndex for easier handling.
"""
# Build flat column labels
flat_cols = []
order_idx = []
i = -1
for col in df.columns.values:
flat_col_lab = ''.join(str(col[:-1])).strip()
if not flat_col_lab in flat_cols:
i += 1
order_idx.append(i)
flat_cols.append(flat_col_lab)
else:
order_idx.append(i)
# Drop unwanted levels (keep last Values Index-level in that process)
levels = list(range(0, df.columns.nlevels-1))
drop_levels = levels[:-2]+ [levels[-1]]
df.columns = df.columns.droplevel(drop_levels)
# Apply the new flat labels and resort the columns
df.columns.set_levels(levels=flat_cols, level=0, inplace=True)
df.columns.set_codes(order_idx, level=0, inplace=True)
return df, flat_cols
@staticmethod
def _replace_test_results(df, replacement_map, char_repr):
"""
Swap all digit-based results with letters referencing the column header.
.. note:: The modified df will be stripped of all indexing on both rows
and columns.
"""
all_dfs = []
ignore = False
for col in list(replacement_map.keys()):
target_col = df.columns[0] if col == '@' else col
value_df = df[[target_col]].copy()
if not col == '@':
value_df.drop('@', axis=1, level=1, inplace=True)
values = value_df.replace(np.NaN, '-').values.tolist()
r = replacement_map[col]
new_values = []
case = None
for v in values:
if isinstance(v[0], str):
if char_repr == 'upper':
case = 'up'
elif char_repr == 'lower':
case = 'low'
elif char_repr == 'alternate':
if case == 'up':
case = 'low'
else:
case = 'up'
for no, l in sorted(list(r.items()), reverse=True):
v = [char.replace(str(no), l if case == 'up' else l.lower())
if isinstance(char, str)
else char for char in v]
new_values.append(v)
else:
new_values.append(v)
part_df = pd.DataFrame(new_values)
all_dfs.append(part_df)
letter_df = pd.concat(all_dfs, axis=1)
# Clean it up
letter_df.replace('-', np.NaN, inplace=True)
for signs in [('[', ''), (']', ''), (', ', '.')]:
letter_df = letter_df.applymap(lambda x: x.replace(signs[0], signs[1])
if isinstance(x, str) else x)
return letter_df
@staticmethod
def _get_abc_letters(no_of_cols, incl_total):
"""
Get the list of letter replacements depending on the y-axis length.
"""
repeat_alphabet = int(no_of_cols / 26)
abc = list(string.ascii_uppercase)
letters = list(string.ascii_uppercase)
if repeat_alphabet:
for r in range(0, repeat_alphabet):
letter = abc[r]
extend_abc = ['{}{}'.format(letter, l) for l in abc]
letters.extend(extend_abc)
if incl_total:
letters = ['@'] + letters[:no_of_cols-1]
else:
letters = letters[:no_of_cols]
return letters
def _any_tests(self):
vms = [v.split('|')[1] for v in list(self._views.keys())]
return any('t.' in v for v in vms)
def _no_of_tests(self):
tests = [v for v in list(self._views.keys())
if v.split('|')[1].startswith('t.')]
levels = [v.split('|')[1].split('.')[-1] for v in tests]
return len(set(levels))
def _siglevel_on_row(self):
"""
"""
vpr = self._views_per_rows()
tests = [(no, v) for no, v in enumerate(vpr)
if v.split('|')[1].startswith('t.')]
s = [(t[0],
float(int(t[1].split('|')[1].split('.')[3].split('+')[0]))/100.0)
for t in tests]
return s
def transform_tests(self, char_repr='upper', display_level=True):
"""
Transform column-wise digit-based test representation to letters.
Adds a new row that is applying uppercase letters to all columns (A,
B, C, ...) and maps any significance test's result cells to these column
indicators.
"""
if not self._any_tests(): return None
# Preparation of input dataframe and dimensions of y-axis header
df = self.dataframe.copy()
number_codes = df.columns.get_level_values(-1).tolist()
number_header_row = copy.copy(df.columns)
if self._no_of_tests() != 2 and char_repr == 'alternate':
char_repr = 'upper'
has_total = '@' in self._y_keys
if self._nested_y:
df, questions = self._temp_nest_index(df)
else:
questions = self._y_keys
all_num = number_codes if not has_total else [0] + number_codes[1:]
# Set the new column header (ABC, ...)
column_letters = self._get_abc_letters(len(number_codes), has_total)
vals = df.columns.get_level_values(0).tolist()
mi = pd.MultiIndex.from_arrays(
(vals,
column_letters))
df.columns = mi
self.sig_test_letters = df.columns.get_level_values(1).tolist()
# Build the replacements dict and build list of unique column indices
test_dict = OrderedDict()
for num_idx, col in enumerate(df.columns):
if col[1] == '@':
question = col[1]
else:
question = col[0]
if not question in test_dict: test_dict[question] = {}
number = all_num[num_idx]
letter = col[1]
test_dict[question][number] = letter
letter_df = self._replace_test_results(df, test_dict, char_repr)
# Re-apply indexing & finalize the new crossbreak column header
if display_level:
levels = self._siglevel_on_row()
index = df.index.get_level_values(1).tolist()
for i, l in levels:
index[i] = '#Level: {}'.format(l)
l0 = df.index.get_level_values(0).tolist()[0]
tuples = [(l0, i) for i in index]
index = pd.MultiIndex.from_tuples(
tuples, names=['Question', 'Values'])
letter_df.index = index
else:
letter_df.index = df.index
letter_df.columns = number_header_row
letter_df = self._apply_letter_header(letter_df)
self._frame = letter_df
return self
def _remove_letter_header(self):
self._frame.columns = self._frame.columns.droplevel(level=-1)
return None
def _apply_letter_header(self, df):
"""
"""
new_tuples = []
org_names = [n for n in df.columns.names]
idx = df.columns
for i, l in zip(idx, self.sig_test_letters):
new_tuples.append(i + (l, ))
if not 'Test-IDs' in org_names:
org_names.append('Test-IDs')
mi = pd.MultiIndex.from_tuples(new_tuples, names=org_names)
df.columns = mi
return df
def _extract_base_descriptions(self):
"""
"""
if self.source == 'Crunch multitable':
self.base_descriptions = self._meta['var_meta'].get('notes', None)
else:
base_texts = OrderedDict()
arr_style = self.array_style
if arr_style != -1:
var = self._x_keys[0] if arr_style == 0 else self._y_keys[0]
masks = self._meta['masks']
columns = self._meta['columns']
item = masks[var]['items'][0]['source'].split('@')[-1]
test_item = columns[item]
test_mask = masks[var]
if 'properties' in test_mask:
base_text = test_mask['properties'].get('base_text', None)
elif 'properties' in test_item:
base_text = test_item['properties'].get('base_text', None)
else:
base_text = None
self.base_descriptions = base_text
else:
for x in self._x_keys:
if 'properties' in self._meta['columns'][x]:
bt = self._meta['columns'][x]['properties'].get('base_text', None)
if bt:
base_texts[x] = bt
if base_texts:
if self.orientation == 'x':
self.base_descriptions = list(base_texts.values())[0]
else:
self.base_descriptions = list(base_texts.values())
return None
def _ensure_indexes(self):
if self.painted:
self._frame.index, self._frame.columns = self.index, self.columns
if self.structure is not None:
self._frame.loc[:, :] = self.frame_values
else:
self.index, self.columns = self._frame.index, self._frame.columns
if self.structure is not None:
self.frame_values = self._frame.values
def _finish_text_key(self, text_key, text_loc_x, text_loc_y):
text_keys = dict()
text_key = text_key or self._default_text
if text_loc_x:
text_keys['x'] = (text_loc_x, text_key)
else:
text_keys['x'] = text_key
if text_loc_y:
text_keys['y'] = (text_loc_y, text_key)
else:
text_keys['y'] = text_key
return text_keys
def paint(self, text_key=None, text_loc_x=None, text_loc_y=None, display=None,
axes=None, view_level=False, transform_tests='upper', display_level=True,
add_test_ids=True, add_base_texts='simple', totalize=False,
sep=None, na_rep=None, transform_column_names=None,
exclude_mask_text=False):
"""
Apply labels, sig. testing conversion and other post-processing to the
``Chain.dataframe`` property.
Use this to prepare a ``Chain`` for further usage in an Excel or Power-
point Build.
Parameters
----------
text_keys : str, default None
Text
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the x-axis
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the y-axis
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'upper', 'lower', 'alternate'}, default 'upper'
Text
add_test_ids : bool, default True
Text
add_base_texts : {False, 'all', 'simple', 'simple-no-items'}, default 'simple'
Whether or not to include existing ``.base_descriptions`` str
to the label of the appropriate base view. Selecting ``'simple'``
will inject the base texts to non-array type Chains only.
totalize : bool, default True
Text
sep : str, default None
The seperator used for painting ``pandas.DataFrame`` columns
na_rep : str, default None
numpy.NaN will be replaced with na_rep if passed
transform_column_names : dict, default None
Transformed column_names are added to the labeltexts.
exclude_mask_text : bool, default False
Exclude mask text from mask-item texts.
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
self._ensure_indexes()
text_keys = self._finish_text_key(text_key, text_loc_x, text_loc_y)
if self.structure is not None:
self._paint_structure(text_key, sep=sep, na_rep=na_rep)
else:
self.totalize = totalize
if transform_tests: self.transform_tests(transform_tests, display_level)
# Remove any letter header row from transformed tests...
if self.sig_test_letters:
self._remove_letter_header()
if display is None:
display = _AXES
if axes is None:
axes = _AXES
self._paint(text_keys, display, axes, add_base_texts,
transform_column_names, exclude_mask_text)
# Re-build the full column index (labels + letter row)
if self.sig_test_letters and add_test_ids:
self._frame = self._apply_letter_header(self._frame)
if view_level:
self._add_view_level()
self.painted = True
return None
def _paint_structure(self, text_key=None, sep=None, na_rep=None):
""" Paint the dataframe-type Chain.
"""
if not text_key:
text_key = self._meta['lib']['default text']
str_format = '%%s%s%%s' % sep
column_mapper = dict()
na_rep = na_rep or ''
pattern = r'\, (?=\W|$)'
for column in self.structure.columns:
if not column in self._meta['columns']: continue
meta = self._meta['columns'][column]
if sep:
column_mapper[column] = str_format % (column, meta['text'][text_key])
else:
column_mapper[column] = meta['text'][text_key]
if meta.get('values'):
values = meta['values']
if isinstance(values, str):
pointers = values.split('@')
values = self._meta[pointers.pop(0)]
while pointers:
values = values[pointers.pop(0)]
if meta['type'] == 'delimited set':
value_mapper = {
str(item['value']): item['text'][text_key]
for item in values
}
series = self.structure[column]
try:
series = (series.str.split(';')
.apply(pd.Series, 1)
.stack(dropna=False)
.map(value_mapper.get) #, na_action='ignore')
.unstack())
first = series[series.columns[0]]
rest = [series[c] for c in series.columns[1:]]
self.structure[column] = (
first
.str.cat(rest, sep=', ', na_rep='')
.str.slice(0, -2)
.replace(to_replace=pattern, value='', regex=True)
.replace(to_replace='', value=na_rep)
)
except AttributeError:
continue
else:
value_mapper = {
item['value']: item['text'][text_key]
for item in values
}
self.structure[column] = (self.structure[column]
.map(value_mapper.get,
na_action='ignore')
)
self.structure[column].fillna(na_rep, inplace=True)
self.structure.rename(columns=column_mapper, inplace=True)
def _paint(self, text_keys, display, axes, bases, transform_column_names,
exclude_mask_text):
""" Paint the Chain.dataframe
"""
indexes = []
for axis in _AXES:
index = self._index_switch(axis)
if axis in axes:
index = self._paint_index(index, text_keys, display, axis,
bases, transform_column_names,
exclude_mask_text)
indexes.append(index)
self._frame.index, self._frame.columns = indexes
def _paint_index(self, index, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text):
""" Paint the Chain.dataframe.index1 """
error = "No text keys from {} found in {}"
level_0_text, level_1_text = [], []
nlevels = index.nlevels
if nlevels > 2:
arrays = []
for i in range(0, nlevels, 2):
index_0 = index.get_level_values(i)
index_1 = index.get_level_values(i+1)
tuples = list(zip(index_0.values, index_1.values))
names = (index_0.name, index_1.name)
sub = pd.MultiIndex.from_tuples(tuples, names=names)
sub = self._paint_index(sub, text_keys, display, axis, bases,
transform_column_names, exclude_mask_text)
arrays.extend(self._lzip(sub.ravel()))
tuples = self._lzip(arrays)
return pd.MultiIndex.from_tuples(tuples, names=index.names)
levels = self._lzip(index.values)
arrays = (self._get_level_0(levels[0], text_keys, display, axis,
transform_column_names, exclude_mask_text),
self._get_level_1(levels, text_keys, display, axis, bases))
new_index = pd.MultiIndex.from_arrays(arrays, names=index.names)
return new_index
def _get_level_0(self, level, text_keys, display, axis,
transform_column_names, exclude_mask_text):
"""
"""
level_0_text = []
for value in level:
if str(value).startswith('#pad'):
pass
elif pd.notnull(value):
if value in list(self._text_map.keys()):
value = self._text_map[value]
else:
text = self._get_text(value, text_keys[axis], exclude_mask_text)
if axis in display:
if transform_column_names:
value = transform_column_names.get(value, value)
value = '{}. {}'.format(value, text)
else:
value = text
level_0_text.append(value)
if '@' in self._y_keys and self.totalize and axis == 'y':
level_0_text = ['Total'] + level_0_text[1:]
return list(map(str, level_0_text))
def _get_level_1(self, levels, text_keys, display, axis, bases):
"""
"""
level_1_text = []
if text_keys[axis] in self._transl:
tk_transl = text_keys[axis]
else:
tk_transl = self._default_text
c_text = copy.deepcopy(self._custom_texts) if self._custom_texts else {}
for i, value in enumerate(levels[1]):
if str(value).startswith('#pad'):
level_1_text.append(value)
elif pd.isnull(value):
level_1_text.append(value)
elif str(value) == '':
level_1_text.append(value)
elif str(value).startswith('#Level: '):
level_1_text.append(value.replace('#Level: ', ''))
else:
translate = list(self._transl[list(self._transl.keys())[0]].keys())
if value in list(self._text_map.keys()) and value not in translate:
level_1_text.append(self._text_map[value])
elif value in translate:
if value == 'All':
text = self._specify_base(i, text_keys[axis], bases)
else:
text = self._transl[tk_transl][value]
if value in c_text:
add_text = c_text[value].pop(0)
text = '{} {}'.format(text, add_text)
level_1_text.append(text)
elif value == 'All (eff.)':
text = self._specify_base(i, text_keys[axis], bases)
level_1_text.append(text)
else:
if any(self.array_style == a and axis == x for a, x in ((0, 'x'), (1, 'y'))):
text = self._get_text(value, text_keys[axis], True)
level_1_text.append(text)
else:
try:
values = self._get_values(levels[0][i])
if not values:
level_1_text.append(value)
else:
for item in self._get_values(levels[0][i]):
if int(value) == item['value']:
text = self._get_text(item, text_keys[axis])
level_1_text.append(text)
except (ValueError, UnboundLocalError):
if self._grp_text_map:
for gtm in self._grp_text_map:
if value in list(gtm.keys()):
text = self._get_text(gtm[value], text_keys[axis])
level_1_text.append(text)
return list(map(str, level_1_text))
@staticmethod
def _unwgt_label(views, base_vk):
valid = ['cbase', 'cbase_gross', 'rbase', 'ebase']
basetype = base_vk.split('|')[-1]
views_split = [v.split('|') for v in views]
multibase = len([v for v in views_split if v[-1] == basetype]) > 1
weighted = base_vk.split('|')[-2]
w_diff = len([v for v in views_split
if not v[-1] in valid and not v[-2] == weighted]) > 0
if weighted:
return False
elif multibase or w_diff:
return True
else:
return False
def _add_base_text(self, base_val, tk, bases):
if self._array_style == 0 and bases != 'all':
return base_val
else:
bt = self.base_descriptions
if isinstance(bt, dict):
bt_by_key = bt[tk]
else:
bt_by_key = bt
if bt_by_key:
if bt_by_key.startswith('%s:' % base_val):
bt_by_key = bt_by_key.replace('%s:' % base_val, '')
return '{}: {}'.format(base_val, bt_by_key)
else:
return base_val
def _specify_base(self, view_idx, tk, bases):
tk_transl = tk if tk in self._transl else self._default_text
base_vk = self._valid_views()[view_idx]
basetype = base_vk.split('|')[-1]
unwgt_label = self._unwgt_label(list(self._views.keys()), base_vk)
if unwgt_label:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['no_w_gross_All']
elif basetype == 'ebase':
base_value = 'Unweighted effective base'
else:
base_value = self._transl[tk_transl]['no_w_All']
else:
if basetype == 'cbase_gross':
base_value = self._transl[tk_transl]['gross All']
elif basetype == 'ebase':
base_value = 'Effective base'
elif not bases or (bases == 'simple-no-items' and self._is_mask_item):
base_value = self._transl[tk_transl]['All']
else:
key = tk
if isinstance(tk, tuple):
_, key = tk
base_value = self._add_base_text(self._transl[tk_transl]['All'],
key, bases)
return base_value
def _get_text(self, value, text_key, item_text=False):
"""
"""
if value in list(self._meta['columns'].keys()):
col = self._meta['columns'][value]
if item_text and col.get('parent'):
parent = list(col['parent'].keys())[0].split('@')[-1]
items = self._meta['masks'][parent]['items']
for i in items:
if i['source'].split('@')[-1] == value:
obj = i['text']
break
else:
obj = col['text']
elif value in list(self._meta['masks'].keys()):
obj = self._meta['masks'][value]['text']
elif 'text' in value:
obj = value['text']
else:
obj = value
return self._get_text_from_key(obj, text_key)
def _get_text_from_key(self, text, text_key):
""" Find the first value in a meta object's "text" key that matches a
text_key for it's axis.
"""
if isinstance(text_key, tuple):
loc, key = text_key
if loc in text:
if key in text[loc]:
return text[loc][key]
elif self._default_text in text[loc]:
return text[loc][self._default_text]
if key in text:
return text[key]
for key in (text_key, self._default_text):
if key in text:
return text[key]
return '<label>'
def _get_values(self, column):
""" Returns values from self._meta["columns"] or
self._meta["lib"]["values"][<mask name>] if parent is "array"
"""
if column in self._meta['columns']:
values = self._meta['columns'][column].get('values', [])
elif column in self._meta['masks']:
values = self._meta['lib']['values'].get(column, [])
if isinstance(values, str):
keys = values.split('@')
values = self._meta[keys.pop(0)]
while keys:
values = values[keys.pop(0)]
return values
def _add_view_level(self, shorten=False):
""" Insert a third Index level containing View keys into the DataFrame.
"""
vnames = self._views_per_rows()
if shorten:
vnames = [v.split('|')[-1] for v in vnames]
self._frame['View'] = pd.Series(vnames, index=self._frame.index)
self._frame.set_index('View', append=True, inplace=True)
def toggle_labels(self):
""" Restore the unpainted/ painted Index, Columns appearance.
"""
if self.painted:
self.painted = False
else:
self.painted = True
attrs = ['index', 'columns']
if self.structure is not None:
attrs.append('_frame_values')
for attr in attrs:
vals = attr[6:] if attr.startswith('_frame') else attr
frame_val = getattr(self._frame, vals)
setattr(self._frame, attr, getattr(self, attr))
setattr(self, attr, frame_val)
if self.structure is not None:
values = self._frame.values
self._frame.loc[:, :] = self.frame_values
self.frame_values = values
return self
@staticmethod
def _single_column(*levels):
""" Returns True if multiindex level 0 has one unique value
"""
return all(len(level) == 1 for level in levels)
def _group_views(self, frame, group_type):
""" Re-sort rows so that they appear as being grouped inside the
Chain.dataframe.
"""
grouped_frame = []
len_of_frame = len(frame)
frame = pd.concat(frame, axis=0)
index_order = frame.index.get_level_values(1).tolist()
index_order = index_order[:int(len(index_order) / len_of_frame)]
gb_df = frame.groupby(level=1, sort=False)
for i in index_order:
grouped_df = gb_df.get_group(i)
if group_type == 'reduced':
grouped_df = self._reduce_grouped_index(grouped_df, len_of_frame-1)
grouped_frame.append(grouped_df)
grouped_frame = pd.concat(grouped_frame, verify_integrity=False)
return grouped_frame
@staticmethod
def _reduce_grouped_index(grouped_df, view_padding, array_summary=-1):
idx = grouped_df.index
q = idx.get_level_values(0).tolist()[0]
if array_summary == 0:
val = idx.get_level_values(1).tolist()
for index in range(1, len(val), 2):
val[index] = ''
grp_vals = val
elif array_summary == 1:
grp_vals = []
indexed = []
val = idx.get_level_values(1).tolist()
for v in val:
if not v in indexed or v == 'All':
grp_vals.append(v)
indexed.append(v)
else:
grp_vals.append('')
else:
val = idx.get_level_values(1).tolist()[0]
grp_vals = [val] + [''] * view_padding
mi = pd.MultiIndex.from_product([[q], grp_vals], names=idx.names)
grouped_df.index = mi
return grouped_df
@staticmethod
def _lzip(arr):
"""
"""
return list(zip(*arr))
@staticmethod
def _force_list(obj):
if isinstance(obj, (list, tuple)):
return obj
return [obj]
@classmethod
def __pad_id(cls):
cls._pad_id += 1
return cls._pad_id
# class MTDChain(Chain):
# def __init__(self, mtd_doc, name=None):
# super(MTDChain, self).__init__(stack=None, name=name, structure=None)
# self.mtd_doc = mtd_doc
# self.source = 'Dimensions MTD'
self.get = self._get
# def _get(self, ignore=None, labels=True):
# per_folder = OrderedDict()
# failed = []
# unsupported = []
# for name, tab_def in self.mtd_doc.items():
# try:
# if isinstance(tab_def.values()[0], dict):
# unsupported.append(name)
# else:
# tabs = split_tab(tab_def)
# chain_dfs = []
# for tab in tabs:
# df, meta = tab[0], tab[1]
# # SOME DFs HAVE TOO MANY / UNUSED LEVELS...
# if len(df.columns.levels) > 2:
# df.columns = df.columns.droplevel(0)
# x, y = _get_axis_vars(df)
# df.replace('-', np.NaN, inplace=True)
# relabel_axes(df, meta, labels=labels)
# df = df.drop('Base', axis=1, level=1)
# try:
# df = df.applymap(lambda x: float(x.replace(',', '.')
# if isinstance(x, (str, unicode)) else x))
# except:
# msg = "Could not convert df values to float for table '{}'!"
# # warnings.warn(msg.format(name))
# chain_dfs.append(to_chain((df, x, y), meta))
# per_folder[name] = chain_dfs
# except:
# failed.append(name)
# print 'Conversion failed for:\n{}\n'.format(failed)
# print 'Subfolder conversion unsupported for:\n{}'.format(unsupported)
# return per_folder
##############################################################################
class Quantity(object):
"""
The Quantity object is the main Quantipy aggregation engine.
Consists of a link's data matrix representation and sectional defintion
of weight vector (wv), x-codes section (xsect) and y-codes section
(ysect). The instance methods handle creation, retrieval and manipulation
of the data input matrices and section definitions as well as the majority
of statistical calculations.
"""
# -------------------------------------------------
# Instance initialization
# -------------------------------------------------
def __init__(self, link, weight=None, use_meta=False, base_all=False):
# Collect information on wv, x- and y-section
self._uses_meta = use_meta
self.ds = self._convert_to_dataset(link)
self.d = self._data
self.base_all = base_all
self._dataidx = link.get_data().index
if self._uses_meta:
self.meta = self._meta
if list(self.meta().values()) == [None] * len(list(self.meta().values())):
self._uses_meta = False
self.meta = None
else:
self.meta = None
self._cache = link.get_cache()
self.f = link.filter
self.x = link.x
self.y = link.y
self.w = weight if weight is not None else '@1'
self.is_weighted = False
self.type = self._get_type()
if self.type == 'nested':
self.nest_def = Nest(self.y, self.d(), self.meta()).nest()
self._squeezed = False
self.idx_map = None
self.xdef = self.ydef = None
self.matrix = self._get_matrix()
self.is_empty = self.matrix.sum() == 0
self.switched = False
self.factorized = None
self.result = None
self.logical_conditions = []
self.cbase = self.rbase = None
self.comb_x = self.comb_y = None
self.miss_x = self.miss_y = None
self.calc_x = self.calc_y = None
self._has_x_margin = self._has_y_margin = False
def __repr__(self):
if self.result is not None:
return '%s' % (self.result)
else:
return 'Quantity - x: {}, xdef: {} y: {}, ydef: {}, w: {}'.format(
self.x, self.xdef, self.y, self.ydef, self.w)
# -------------------------------------------------
# Matrix creation and retrievel
# -------------------------------------------------
def _convert_to_dataset(self, link):
ds = qp.DataSet('')
ds._data = link.stack[link.data_key].data
ds._meta = link.get_meta()
return ds
def _data(self):
return self.ds._data
def _meta(self):
return self.ds._meta
def _get_type(self):
"""
Test variable type that can be "simple", "nested" or "array".
"""
if self._uses_meta:
masks = [self.x, self.y]
if any(mask in list(self.meta()['masks'].keys()) for mask in masks):
mask = {
True: self.x,
False: self.y}.get(self.x in list(self.meta()['masks'].keys()))
if self.meta()['masks'][mask]['type'] == 'array':
if self.x == '@':
self.x, self.y = self.y, self.x
return 'array'
elif '>' in self.y:
return 'nested'
else:
return 'simple'
else:
return 'simple'
def _is_multicode_array(self, mask_element):
return (
self.d()[mask_element].dtype == 'str'
)
def _get_wv(self):
"""
Returns the weight vector of the matrix.
"""
return self.d()[[self.w]].values
def weight(self):
"""
Weight by multiplying the indicator entries with the weight vector.
"""
self.matrix *= np.atleast_3d(self.wv)
# if self.is_weighted:
# self.matrix[:, 1:, 1:] *= np.atleast_3d(self.wv)
# else:
# self.matrix *= np.atleast_3d(self.wv)
# self.is_weighted = True
return None
def unweight(self):
"""
Remove any weighting by dividing the matrix by itself.
"""
self.matrix /= self.matrix
# self.matrix[:, 1:, 1:] /= self.matrix[:, 1:, 1:]
# self.is_weighted = False
return None
def _get_total(self):
"""
Return a vector of 1s for the matrix.
"""
return self.d()[['@1']].values
def _copy(self):
"""
Copy the Quantity instance, i.e. its data matrix, into a new object.
"""
m_copy = np.empty_like(self.matrix)
m_copy[:] = self.matrix
c = copy.copy(self)
c.matrix = m_copy
return c
def _get_response_codes(self, var):
"""
Query the meta specified codes values for a meta-using Quantity.
"""
if self.type == 'array':
rescodes = [v['value'] for v in self.meta()['lib']['values'][var]]
else:
values = emulate_meta(
self.meta(), self.meta()['columns'][var].get('values', None))
rescodes = [v['value'] for v in values]
return rescodes
def _get_response_texts(self, var, text_key=None):
"""
Query the meta specified text values for a meta-using Quantity.
"""
if text_key is None: text_key = 'main'
if self.type == 'array':
restexts = [v[text_key] for v in self.meta()['lib']['values'][var]]
else:
values = emulate_meta(
self.meta(), self.meta()['columns'][var].get('values', None))
restexts = [v['text'][text_key] for v in values]
return restexts
def _switch_axes(self):
"""
"""
if self.switched:
self.switched = False
self.matrix = self.matrix.swapaxes(1, 2)
else:
self.switched = True
self.matrix = self.matrix.swapaxes(2, 1)
self.xdef, self.ydef = self.ydef, self.xdef
self._x_indexers, self._y_indexers = self._y_indexers, self._x_indexers
self.comb_x, self.comb_y = self.comb_y, self.comb_x
self.miss_x, self.miss_y = self.miss_y, self.miss_x
return self
def _reset(self):
for prop in list(self.__dict__.keys()):
if prop in ['_uses_meta', 'base_all', '_dataidx', 'meta', '_cache',
'd', 'idx_map']:
pass
elif prop in ['_squeezed', 'switched']:
self.__dict__[prop] = False
else:
self.__dict__[prop] = None
self.result = None
return None
def swap(self, var, axis='x', inplace=True):
"""
Change the Quantity's x- or y-axis keeping filter and weight setup.
All edits and aggregation results will be removed during the swap.
Parameters
----------
var : str
New variable's name used in axis swap.
axis : {'x', 'y'}, default ``'x'``
The axis to swap.
inplace : bool, default ``True``
Whether to modify the Quantity inplace or return a new instance.
Returns
-------
swapped : New Quantity instance with exchanged x- or y-axis.
"""
if axis == 'x':
x = var
y = self.y
else:
x = self.x
y = var
f, w = self.f, self.w
if inplace:
swapped = self
else:
swapped = self._copy()
swapped._reset()
swapped.x, swapped.y = x, y
swapped.f, swapped.w = f, w
swapped.type = swapped._get_type()
swapped._get_matrix()
if not inplace:
return swapped
def rescale(self, scaling, drop=False):
"""
Modify the object's ``xdef`` property reflecting new value defintions.
Parameters
----------
scaling : dict
Mapping of old_code: new_code, given as of type int or float.
drop : bool, default False
If True, codes not included in the scaling dict will be excluded.
Returns
-------
self
"""
proper_scaling = {old_code: new_code for old_code, new_code
in list(scaling.items()) if old_code in self.xdef}
xdef_ref = [proper_scaling[code] if code in list(proper_scaling.keys())
else code for code in self.xdef]
if drop:
to_drop = [code for code in self.xdef if code not in
list(proper_scaling.keys())]
self.exclude(to_drop, axis='x')
self.xdef = xdef_ref
return self
def exclude(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=False, ..., keep_base=False, ...)
Excludes specified codes from aggregation.
"""
self._missingfy(codes, axis=axis, keep_base=False, inplace=True)
return self
def limit(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=True, ..., keep_base=True, ...)
Restrict the data matrix entires to contain the specified codes only.
"""
self._missingfy(codes, axis=axis, keep_codes=True, keep_base=True,
inplace=True)
return self
def filter(self, condition, keep_base=True, inplace=False):
"""
Use a Quantipy conditional expression to filter the data matrix entires.
"""
if inplace:
filtered = self
else:
filtered = self._copy()
qualified_rows = self._get_logic_qualifiers(condition)
valid_rows = self.idx_map[self.idx_map[:, 0] == 1][:, 1]
filter_idx = np.in1d(valid_rows, qualified_rows)
if keep_base:
filtered.matrix[~filter_idx, 1:, :] = np.NaN
else:
filtered.matrix[~filter_idx, :, :] = np.NaN
if not inplace:
return filtered
def _get_logic_qualifiers(self, condition):
if not isinstance(condition, dict):
column = self.x
logic = condition
else:
column = list(condition.keys())[0]
logic = list(condition.values())[0]
idx, logical_expression = get_logic_index(self.d()[column], logic, self.d())
logical_expression = logical_expression.split(':')[0]
if not column == self.x:
logical_expression = logical_expression.replace('x[', column+'[')
self.logical_conditions.append(logical_expression)
return idx
def _missingfy(self, codes, axis='x', keep_codes=False, keep_base=True,
indices=False, inplace=True):
"""
Clean matrix from entries preserving or modifying the weight vector.
Parameters
----------
codes : list
A list of codes to be considered in cleaning.
axis : {'x', 'y'}, default 'x'
The axis to clean codes on. Refers to the Link object's x- and y-
axes.
keep_codes : bool, default False
Controls whether the passed codes are kept or erased from the
Quantity matrix data entries.
keep_base: bool, default True
Controls whether the weight vector is set to np.NaN alongside
the x-section rows or remains unmodified.
indices: bool, default False
If ``True``, the data matrix indicies of the corresponding codes
will be returned as well.
inplace : bool, default True
Will overwrite self.matrix with the missingfied matrix by default.
If ``False``, the method will return a new np.array with the
modified entries.
Returns
-------
self or numpy.array (and optionally a list of int when ``indices=True``)
Either a new matrix is returned as numpy.array or the ``matrix``
property is modified inplace.
"""
if inplace:
missingfied = self
else:
missingfied = self._copy()
if axis == 'y' and self.y == '@' and not self.type == 'array':
return self
elif axis == 'y' and self.type == 'array':
ni_err = 'Cannot missingfy array mask element sections!'
raise NotImplementedError(ni_err)
else:
if axis == 'y':
missingfied._switch_axes()
mis_ix = missingfied._get_drop_idx(codes, keep_codes)
mis_ix = [code + 1 for code in mis_ix]
if mis_ix is not None:
for ix in mis_ix:
np.place(missingfied.matrix[:, ix],
missingfied.matrix[:, ix] > 0, np.NaN)
if not keep_base:
if axis == 'x':
self.miss_x = codes
else:
self.miss_y = codes
if self.type == 'array':
mask = np.nansum(missingfied.matrix[:, missingfied._x_indexers],
axis=1, keepdims=True)
mask /= mask
mask = mask > 0
else:
mask = np.nansum(np.sum(missingfied.matrix,
axis=1, keepdims=False),
axis=1, keepdims=True) > 0
missingfied.matrix[~mask] = np.NaN
if axis == 'y':
missingfied._switch_axes()
if inplace:
self.matrix = missingfied.matrix
if indices:
return mis_ix
else:
if indices:
return missingfied, mis_ix
else:
return missingfied
def _organize_global_missings(self, missings):
hidden = [c for c in list(missings.keys()) if missings[c] == 'hidden']
excluded = [c for c in list(missings.keys()) if missings[c] == 'excluded']
shown = [c for c in list(missings.keys()) if missings[c] == 'shown']
return hidden, excluded, shown
def _organize_stats_missings(self, missings):
excluded = [c for c in list(missings.keys())
if missings[c] in ['d.excluded', 'excluded']]
return excluded
def _autodrop_stats_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
missings = self.ds._get_missings(self.x)
to_drop = self._organize_stats_missings(missings)
self.exclude(to_drop)
else:
pass
return None
def _clean_from_global_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
missings = self.ds._get_missings(self.x)
hidden, excluded, shown = self._organize_global_missings(missings)
if excluded:
excluded_codes = excluded
excluded_idxer = self._missingfy(excluded, keep_base=False,
indices=True)
else:
excluded_codes, excluded_idxer = [], []
if hidden:
hidden_codes = hidden
hidden_idxer = self._get_drop_idx(hidden, keep=False)
hidden_idxer = [code + 1 for code in hidden_idxer]
else:
hidden_codes, hidden_idxer = [], []
dropped_codes = excluded_codes + hidden_codes
dropped_codes_idxer = excluded_idxer + hidden_idxer
self._x_indexers = [x_idx for x_idx in self._x_indexers
if x_idx not in dropped_codes_idxer]
self.matrix = self.matrix[:, [0] + self._x_indexers]
self.xdef = [x_c for x_c in self.xdef if x_c not in dropped_codes]
else:
pass
return None
def _get_drop_idx(self, codes, keep):
"""
Produces a list of indices referring to the given input matrix's axes
sections in order to erase data entries.
Parameters
----------
codes : list
Data codes that should be dropped from or kept in the matrix.
keep : boolean
Controls if the the passed code defintion is interpreted as
"codes to keep" or "codes to drop".
Returns
-------
drop_idx : list
List of x section matrix indices.
"""
if codes is None:
return None
else:
if keep:
return [self.xdef.index(code) for code in self.xdef
if code not in codes]
else:
return [self.xdef.index(code) for code in codes
if code in self.xdef]
def group(self, groups, axis='x', expand=None, complete=False):
"""
Build simple or logical net vectors, optionally keeping orginating codes.
Parameters
----------
groups : list, dict of lists or logic expression
The group/net code defintion(s) in form of...
* a simple list: ``[1, 2, 3]``
* a dict of list: ``{'grp A': [1, 2, 3], 'grp B': [4, 5, 6]}``
* a logical expression: ``not_any([1, 2])``
axis : {``'x'``, ``'y'``}, default ``'x'``
The axis to group codes on.
expand : {None, ``'before'``, ``'after'``}, default ``None``
If ``'before'``, the codes that are grouped will be kept and placed
before the grouped aggregation; vice versa for ``'after'``. Ignored
on logical expressions found in ``groups``.
complete : bool, default False
If True, codes that define the Link on the given ``axis`` but are
not present in the ``groups`` defintion(s) will be placed in their
natural position within the aggregation, respecting the value of
``expand``.
Returns
-------
None
"""
# check validity and clean combine instructions
if axis == 'y' and self.type == 'array':
ni_err_array = 'Array mask element sections cannot be combined.'
raise NotImplementedError(ni_err_array)
elif axis == 'y' and self.y == '@':
val_err = 'Total link has no y-axis codes to combine.'
raise ValueError(val_err)
grp_def = self._organize_grp_def(groups, expand, complete, axis)
combines = []
names = []
# generate the net vectors (+ possible expanded originating codes)
for grp in grp_def:
name, group, exp, logical = grp[0], grp[1], grp[2], grp[3]
one_code = len(group) == 1
if one_code and not logical:
vec = self._slice_vec(group[0], axis=axis)
elif not logical and not one_code:
vec, idx = self._grp_vec(group, axis=axis)
else:
vec = self._logic_vec(group)
if axis == 'y':
self._switch_axes()
if exp is not None:
m_idx = [ix for ix in self._x_indexers if ix not in idx]
m_idx = self._sort_indexer_as_codes(m_idx, group)
if exp == 'after':
names.extend(name)
names.extend([c for c in group])
combines.append(
np.concatenate([vec, self.matrix[:, m_idx]], axis=1))
else:
names.extend([c for c in group])
names.extend(name)
combines.append(
np.concatenate([self.matrix[:, m_idx], vec], axis=1))
else:
names.extend(name)
combines.append(vec)
if axis == 'y':
self._switch_axes()
# re-construct the combined data matrix
combines = np.concatenate(combines, axis=1)
if axis == 'y':
self._switch_axes()
combined_matrix = np.concatenate([self.matrix[:, [0]],
combines], axis=1)
if axis == 'y':
combined_matrix = combined_matrix.swapaxes(1, 2)
self._switch_axes()
# update the sectional information
new_sect_def = list(range(0, combined_matrix.shape[1] - 1))
if axis == 'x':
self.xdef = new_sect_def
self._x_indexers = self._get_x_indexers()
self.comb_x = names
else:
self.ydef = new_sect_def
self._y_indexers = self._get_y_indexers()
self.comb_y = names
self.matrix = combined_matrix
def _slice_vec(self, code, axis='x'):
'''
'''
if axis == 'x':
code_idx = self.xdef.index(code) + 1
else:
code_idx = self.ydef.index(code) + 1
if axis == 'x':
m_slice = self.matrix[:, [code_idx]]
else:
self._switch_axes()
m_slice = self.matrix[:, [code_idx]]
self._switch_axes()
return m_slice
def _grp_vec(self, codes, axis='x'):
netted, idx = self._missingfy(codes=codes, axis=axis,
keep_codes=True, keep_base=True,
indices=True, inplace=False)
if axis == 'y':
netted._switch_axes()
net_vec = np.nansum(netted.matrix[:, netted._x_indexers],
axis=1, keepdims=True)
net_vec /= net_vec
return net_vec, idx
def _logic_vec(self, condition):
"""
Create net vector of qualified rows based on passed condition.
"""
filtered = self.filter(condition=condition, inplace=False)
net_vec = np.nansum(filtered.matrix[:, self._x_indexers], axis=1,
keepdims=True)
net_vec /= net_vec
return net_vec
def _grp_type(self, grp_def):
if isinstance(grp_def, list):
if not isinstance(grp_def[0], (int, float)):
return 'block'
else:
return 'list'
elif isinstance(grp_def, tuple):
return 'logical'
elif isinstance(grp_def, dict):
return 'wildcard'
def _add_unused_codes(self, grp_def_list, axis):
'''
'''
query_codes = self.xdef if axis == 'x' else self.ydef
frame_lookup = {c: [[c], [c], None, False] for c in query_codes}
frame = [[code] for code in query_codes]
for grpdef_idx, grpdef in enumerate(grp_def_list):
for code in grpdef[1]:
if [code] in frame:
if grpdef not in frame:
frame[frame.index([code])] = grpdef
else:
frame[frame.index([code])] = '-'
frame = [code for code in frame if not code == '-']
for code in frame:
if code[0] in list(frame_lookup.keys()):
frame[frame.index([code[0]])] = frame_lookup[code[0]]
return frame
def _organize_grp_def(self, grp_def, method_expand, complete, axis):
"""
Sanitize a combine instruction list (of dicts): names, codes, expands.
"""
organized_def = []
codes_used = []
any_extensions = complete
any_logical = False
if method_expand is None and complete:
method_expand = 'before'
if not self._grp_type(grp_def) == 'block':
grp_def = [{'net': grp_def, 'expand': method_expand}]
for grp in grp_def:
if any(isinstance(val, (tuple, dict)) for val in list(grp.values())):
if complete:
ni_err = ('Logical expr. unsupported when complete=True. '
'Only list-type nets/groups can be completed.')
raise NotImplementedError(ni_err)
if 'expand' in list(grp.keys()):
del grp['expand']
expand = None
logical = True
else:
if 'expand' in list(grp.keys()):
grp = copy.deepcopy(grp)
expand = grp['expand']
if expand is None and complete:
expand = 'before'
del grp['expand']
else:
expand = method_expand
logical = False
organized_def.append([list(grp.keys()), list(grp.values())[0], expand, logical])
if expand:
any_extensions = True
if logical:
any_logical = True
codes_used.extend(list(grp.values())[0])
if not any_logical:
if len(set(codes_used)) != len(codes_used) and any_extensions:
ni_err_extensions = ('Same codes in multiple groups unsupported '
'with expand and/or complete =True.')
raise NotImplementedError(ni_err_extensions)
if complete:
return self._add_unused_codes(organized_def, axis)
else:
return organized_def
def _force_to_nparray(self):
"""
Convert the aggregation result into its numpy array equivalent.
"""
if isinstance(self.result, pd.DataFrame):
self.result = self.result.values
return True
else:
return False
def _attach_margins(self):
"""
Force margins back into the current Quantity.result if none are found.
"""
if not self._res_is_stat():
values = self.result
if not self._has_y_margin and not self.y == '@':
margins = False
values = np.concatenate([self.rbase[1:, :], values], 1)
else:
margins = True
if not self._has_x_margin:
margins = False
values = np.concatenate([self.cbase, values], 0)
else:
margins = True
self.result = values
return margins
else:
return False
def _organize_expr_def(self, expression, axis):
"""
"""
# Prepare expression parts and lookups for indexing the agg. result
val1, op, val2 = expression[0], expression[1], expression[2]
if self._res_is_stat():
idx_c = [self.current_agg]
offset = 0
else:
if axis == 'x':
idx_c = self.xdef if not self.comb_x else self.comb_x
else:
idx_c = self.ydef if not self.comb_y else self.comb_y
offset = 1
# Test expression validity and find np.array indices / prepare scalar
# values of the expression
idx_err = '"{}" not found in {}-axis.'
# [1] input is 1. scalar, 2. vector from the agg. result
if isinstance(val1, list):
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = val1[0]
val2 = idx_c.index(val2) + offset
expr_type = 'scalar_1'
# [2] input is 1. vector from the agg. result, 2. scalar
elif isinstance(val2, list):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
val1 = idx_c.index(val1) + offset
val2 = val2[0]
expr_type = 'scalar_2'
# [3] input is two vectors from the agg. result
elif not any(isinstance(val, list) for val in [val1, val2]):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = idx_c.index(val1) + offset
val2 = idx_c.index(val2) + offset
expr_type = 'vectors'
return val1, op, val2, expr_type, idx_c
@staticmethod
def constant(num):
return [num]
def calc(self, expression, axis='x', result_only=False):
"""
Compute (simple) aggregation level arithmetics.
"""
unsupported = ['cbase', 'rbase', 'summary', 'x_sum', 'y_sum']
if self.result is None:
raise ValueError('No aggregation to base calculation on.')
elif self.current_agg in unsupported:
ni_err = 'Aggregation type "{}" not supported.'
raise NotImplementedError(ni_err.format(self.current_agg))
elif axis not in ['x', 'y']:
raise ValueError('Invalid axis parameter: {}'.format(axis))
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
values = self.result
expr_name = list(expression.keys())[0]
if axis == 'x':
self.calc_x = expr_name
else:
self.calc_y = expr_name
values = values.T
expr = list(expression.values())[0]
v1, op, v2, exp_type, index_codes = self._organize_expr_def(expr, axis)
# ====================================================================
# TODO: generalize this calculation part so that it can "parse"
# arbitrary calculation rules given as nested or concatenated
# operators/codes sequences.
if exp_type == 'scalar_1':
val1, val2 = v1, values[[v2], :]
elif exp_type == 'scalar_2':
val1, val2 = values[[v1], :], v2
elif exp_type == 'vectors':
val1, val2 = values[[v1], :], values[[v2], :]
calc_res = op(val1, val2)
# ====================================================================
if axis == 'y':
calc_res = calc_res.T
ap_axis = 0 if axis == 'x' else 1
if result_only:
if not self._res_is_stat():
self.result = np.concatenate([self.result[[0], :], calc_res],
ap_axis)
else:
self.result = calc_res
else:
self.result = np.concatenate([self.result, calc_res], ap_axis)
if axis == 'x':
self.calc_x = index_codes + [self.calc_x]
else:
self.calc_y = index_codes + [self.calc_y]
self.cbase = self.result[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = self.result[:, [0]]
else:
self.rbase = None
if not self._res_is_stat():
self.current_agg = 'calc'
self._organize_margins(has_margin)
else:
self.current_agg = 'calc'
if is_df:
self.to_df()
return self
def count(self, axis=None, raw_sum=False, margin=True, as_df=True):
"""
Count entries over all cells or per axis margin.
Parameters
----------
axis : {None, 'x', 'y'}, deafult None
When axis is None, the frequency of all cells from the uni- or
multivariate distribution is presented. If the axis is specified
to be either 'x' or 'y' the margin per axis becomes the resulting
aggregation.
raw_sum : bool, default False
If True will perform a simple summation over the cells given the
axis parameter. This ignores net counting of qualifying answers in
favour of summing over all answers given when considering margins.
margin : bool, deafult True
Controls whether the margins of the aggregation result are shown.
This also applies to margin aggregations themselves, since they
contain a margin in (form of the total number of cases) as well.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of cell or margin counts
to the ``result`` property.
"""
if axis is None and raw_sum:
raise ValueError('Cannot calculate raw sum without axis.')
if axis is None:
self.current_agg = 'freq'
elif axis == 'x':
self.current_agg = 'cbase' if not raw_sum else 'x_sum'
elif axis == 'y':
self.current_agg = 'rbase' if not raw_sum else 'y_sum'
if not self.w == '@1':
self.weight()
if not self.is_empty or self._uses_meta:
counts = np.nansum(self.matrix, axis=0)
else:
counts = self._empty_result()
self.cbase = counts[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = counts[:, [0]]
else:
self.rbase = None
if axis is None:
self.result = counts
elif axis == 'x':
if not raw_sum:
self.result = counts[[0], :]
else:
self.result = np.nansum(counts[1:, :], axis=0, keepdims=True)
elif axis == 'y':
if not raw_sum:
self.result = counts[:, [0]]
else:
if self.x == '@' or self.y == '@':
self.result = counts[:, [0]]
else:
self.result = np.nansum(counts[:, 1:], axis=1, keepdims=True)
self._organize_margins(margin)
if as_df:
self.to_df()
self.unweight()
return self
def _empty_result(self):
if self._res_is_stat() or self.current_agg == 'summary':
self.factorized = 'x'
xdim = 1 if self._res_is_stat() else 8
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
else:
if self.xdef is not None:
if len(self.xdef) == 0:
xdim = 2
else:
xdim = len(self.xdef) + 1
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
elif self.xdef is None:
xdim = 2
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
return np.zeros((xdim, ydim))
def _effective_n(self, axis=None, margin=True):
self.weight()
effective = (np.nansum(self.matrix, axis=0)**2 /
np.nansum(self.matrix**2, axis=0))
self.unweight()
start_on = 0 if margin else 1
if axis is None:
return effective[start_on:, start_on:]
elif axis == 'x':
return effective[[0], start_on:]
else:
return effective[start_on:, [0]]
def summarize(self, stat='summary', axis='x', margin=True, as_df=True):
"""
Calculate distribution statistics across the given axis.
Parameters
----------
stat : {'summary', 'mean', 'median', 'var', 'stddev', 'sem', varcoeff',
'min', 'lower_q', 'upper_q', 'max'}, default 'summary'
The measure to calculate. Defaults to a summary output of the most
important sample statistics.
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, e.g. column vs. row
means.
margin : bool, default True
Controls whether statistic(s) of the marginal distribution are
shown.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of the descriptive (summary)
statistic(s) to the ``result`` property.
"""
self.current_agg = stat
if self.is_empty:
self.result = self._empty_result()
else:
self._autodrop_stats_missings()
if stat == 'summary':
stddev, mean, base = self._dispersion(axis, measure='sd',
_return_mean=True,
_return_base=True)
self.result = np.concatenate([
base, mean, stddev,
self._min(axis),
self._percentile(perc=0.25),
self._percentile(perc=0.50),
self._percentile(perc=0.75),
self._max(axis)
], axis=0)
elif stat == 'mean':
self.result = self._means(axis)
elif stat == 'var':
self.result = self._dispersion(axis, measure='var')
elif stat == 'stddev':
self.result = self._dispersion(axis, measure='sd')
elif stat == 'sem':
self.result = self._dispersion(axis, measure='sem')
elif stat == 'varcoeff':
self.result = self._dispersion(axis, measure='varcoeff')
elif stat == 'min':
self.result = self._min(axis)
elif stat == 'lower_q':
self.result = self._percentile(perc=0.25)
elif stat == 'median':
self.result = self._percentile(perc=0.5)
elif stat == 'upper_q':
self.result = self._percentile(perc=0.75)
elif stat == 'max':
self.result = self._max(axis)
self._organize_margins(margin)
if as_df:
self.to_df()
return self
def _factorize(self, axis='x', inplace=True):
self.factorized = axis
if inplace:
factorized = self
else:
factorized = self._copy()
if axis == 'y':
factorized._switch_axes()
np.copyto(factorized.matrix[:, 1:, :],
np.atleast_3d(factorized.xdef),
where=factorized.matrix[:, 1:, :]>0)
if not inplace:
return factorized
def _means(self, axis, _return_base=False):
fact = self._factorize(axis=axis, inplace=False)
if not self.w == '@1':
fact.weight()
fact_prod = np.nansum(fact.matrix, axis=0)
fact_prod_sum = np.nansum(fact_prod[1:, :], axis=0, keepdims=True)
bases = fact_prod[[0], :]
means = fact_prod_sum/bases
if axis == 'y':
self._switch_axes()
means = means.T
bases = bases.T
if _return_base:
return means, bases
else:
return means
def _dispersion(self, axis='x', measure='sd', _return_mean=False,
_return_base=False):
"""
Extracts measures of dispersion from the incoming distribution of
X vs. Y. Can return the arithm. mean by request as well. Dispersion
measure supported are standard deviation, variance, coeffiecient of
variation and standard error of the mean.
"""
means, bases = self._means(axis, _return_base=True)
unbiased_n = bases - 1
self.unweight()
factorized = self._factorize(axis, inplace=False)
factorized.matrix[:, 1:] -= means
factorized.matrix[:, 1:] *= factorized.matrix[:, 1:, :]
if not self.w == '@1':
factorized.weight()
diff_sqrt = np.nansum(factorized.matrix[:, 1:], axis=1)
disp = np.nansum(diff_sqrt/unbiased_n, axis=0, keepdims=True)
disp[disp <= 0] = np.NaN
disp[np.isinf(disp)] = np.NaN
if measure == 'sd':
disp = np.sqrt(disp)
elif measure == 'sem':
disp = np.sqrt(disp) / np.sqrt((unbiased_n + 1))
elif measure == 'varcoeff':
disp = np.sqrt(disp) / means
self.unweight()
if _return_mean and _return_base:
return disp, means, bases
elif _return_mean:
return disp, means
elif _return_base:
return disp, bases
else:
return disp
def _max(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
return np.nanmax(vals, axis=0, keepdims=True)
def _min(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
if 0 not in factorized.xdef: np.place(vals, vals == 0, np.inf)
return np.nanmin(vals, axis=0, keepdims=True)
def _percentile(self, axis='x', perc=0.5):
"""
Computes percentiles from the incoming distribution of X vs.Y and the
requested percentile value. The implementation mirrors the algorithm
used in SPSS Dimensions and the EXAMINE procedure in SPSS Statistics.
It based on the percentile defintion #6 (adjusted for survey weights)
in:
Hyndman, <NAME>. and <NAME> (1996) -
"Sample Quantiles in Statistical Packages",
The American Statistician, 50, No. 4, 361-365.
Parameters
----------
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, i.e. column vs. row
medians.
perc : float, default 0.5
Defines the percentile to be computed. Defaults to 0.5,
the sample median.
Returns
-------
percs : np.array
Numpy array storing percentile values.
"""
percs = []
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(np.nansum(factorized.matrix[:, 1:, :], axis=1,
keepdims=True), axis=1)
weights = (vals/vals)*self.wv
for shape_i in range(0, vals.shape[1]):
iter_weights = weights[:, shape_i]
iter_vals = vals[:, shape_i]
mask = ~np.isnan(iter_weights)
iter_weights = iter_weights[mask]
iter_vals = iter_vals[mask]
sorter = np.argsort(iter_vals)
iter_vals = np.take(iter_vals, sorter)
iter_weights = np.take(iter_weights, sorter)
iter_wsum = np.nansum(iter_weights, axis=0)
iter_wcsum = np.cumsum(iter_weights, axis=0)
k = (iter_wsum + 1.0) * perc
if iter_vals.shape[0] == 0:
percs.append(0.00)
elif iter_vals.shape[0] == 1:
percs.append(iter_vals[0])
elif iter_wcsum[0] > k:
wcsum_k = iter_wcsum[0]
percs.append(iter_vals[0])
elif iter_wcsum[-1] <= k:
percs.append(iter_vals[-1])
else:
wcsum_k = iter_wcsum[iter_wcsum <= k][-1]
p_k_idx = np.searchsorted(np.ndarray.flatten(iter_wcsum), wcsum_k)
p_k = iter_vals[p_k_idx]
p_k1 = iter_vals[p_k_idx+1]
w_k1 = iter_weights[p_k_idx+1]
excess = k - wcsum_k
if excess >= 1.0:
percs.append(p_k1)
else:
if w_k1 >= 1.0:
percs.append((1.0-excess)*p_k + excess*p_k1)
else:
percs.append((1.0-(excess/w_k1))*p_k +
(excess/w_k1)*p_k1)
return np.array(percs)[None, :]
def _organize_margins(self, margin):
if self._res_is_stat():
if self.type == 'array' or self.y == '@' or self.x == '@':
self._has_y_margin = self._has_x_margin = False
else:
if self.factorized == 'x':
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
else:
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
if self._res_is_margin():
if self.y == '@' or self.x == '@':
if self.current_agg in ['cbase', 'x_sum']:
self._has_y_margin = self._has_x_margin = False
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if self.current_agg in ['cbase', 'x_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
elif self.current_agg in ['freq', 'summary', 'calc']:
if self.type == 'array' or self.y == '@' or self.x == '@':
if not margin:
self.result = self.result[1:, :]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if not margin:
self.result = self.result[1:, 1:]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = True
else:
pass
def _sort_indexer_as_codes(self, indexer, codes):
mapping = sorted(zip(indexer, codes), key=lambda l: l[1])
return [i[0] for i in mapping]
def _get_y_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
if self.ydef is not None:
idxs = list(range(1, len(self.ydef)+1))
return self._sort_indexer_as_codes(idxs, self.ydef)
else:
return [1]
else:
y_indexers = []
xdef_len = len(self.xdef)
zero_based_ys = [idx for idx in range(0, xdef_len)]
for y_no in range(0, len(self.ydef)):
if y_no == 0:
y_indexers.append(zero_based_ys)
else:
y_indexers.append([idx + y_no * xdef_len
for idx in zero_based_ys])
return y_indexers
def _get_x_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
idxs = list(range(1, len(self.xdef)+1))
return self._sort_indexer_as_codes(idxs, self.xdef)
else:
x_indexers = []
upper_x_idx = len(self.ydef)
start_x_idx = [len(self.xdef) * offset
for offset in range(0, upper_x_idx)]
for x_no in range(0, len(self.xdef)):
x_indexers.append([idx + x_no for idx in start_x_idx])
return x_indexers
def _squeeze_dummies(self):
"""
Reshape and replace initial 2D dummy matrix into its 3D equivalent.
"""
self.wv = self.matrix[:, [-1]]
sects = []
if self.type == 'array':
x_sections = self._get_x_indexers()
y_sections = self._get_y_indexers()
y_total = np.nansum(self.matrix[:, x_sections], axis=1)
y_total /= y_total
y_total = y_total[:, None, :]
for sect in y_sections:
sect = self.matrix[:, sect]
sects.append(sect)
sects = np.dstack(sects)
self._squeezed = True
sects = np.concatenate([y_total, sects], axis=1)
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = []
elif self.type in ['simple', 'nested']:
x = self.matrix[:, :len(self.xdef)+1]
y = self.matrix[:, len(self.xdef)+1:-1]
for i in range(0, y.shape[1]):
sects.append(x * y[:, [i]])
sects = np.dstack(sects)
self._squeezed = True
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = self._get_y_indexers()
#=====================================================================
#THIS CAN SPEED UP PERFOMANCE BY A GOOD AMOUNT BUT STACK-SAVING
#TIME & SIZE WILL SUFFER. WE CAN DEL THE "SQUEEZED" COLLECTION AT
#SAVE STAGE.
#=====================================================================
# self._cache.set_obj(collection='squeezed',
# key=self.f+self.w+self.x+self.y,
# obj=(self.xdef, self.ydef,
# self._x_indexers, self._y_indexers,
# self.wv, self.matrix, self.idx_map))
def _get_matrix(self):
wv = self._cache.get_obj('weight_vectors', self.w)
if wv is None:
wv = self._get_wv()
self._cache.set_obj('weight_vectors', self.w, wv)
total = self._cache.get_obj('weight_vectors', '@1')
if total is None:
total = self._get_total()
self._cache.set_obj('weight_vectors', '@1', total)
if self.type == 'array':
xm, self.xdef, self.ydef = self._dummyfy()
self.matrix = np.concatenate((xm, wv), 1)
else:
if self.y == '@' or self.x == '@':
section = self.x if self.y == '@' else self.y
xm, self.xdef = self._cache.get_obj('matrices', section)
if xm is None:
xm, self.xdef = self._dummyfy(section)
self._cache.set_obj('matrices', section, (xm, self.xdef))
self.ydef = None
self.matrix = np.concatenate((total, xm, total, wv), 1)
else:
xm, self.xdef = self._cache.get_obj('matrices', self.x)
if xm is None:
xm, self.xdef = self._dummyfy(self.x)
self._cache.set_obj('matrices', self.x, (xm, self.xdef))
ym, self.ydef = self._cache.get_obj('matrices', self.y)
if ym is None:
ym, self.ydef = self._dummyfy(self.y)
self._cache.set_obj('matrices', self.y, (ym, self.ydef))
self.matrix = np.concatenate((total, xm, total, ym, wv), 1)
self.matrix = self.matrix[self._dataidx]
self.matrix = self._clean()
self._squeeze_dummies()
self._clean_from_global_missings()
return self.matrix
def _dummyfy(self, section=None):
if section is not None:
# i.e. Quantipy multicode data
if self.d()[section].dtype == 'str' or self.d()[section].dtype == 'object':
section_data = self.d()[section].astype('str').str.get_dummies(';')
if self._uses_meta:
res_codes = self._get_response_codes(section)
section_data.columns = [int(col) for col in section_data.columns]
section_data = section_data.reindex(columns=res_codes)
section_data.replace(np.NaN, 0, inplace=True)
if not self._uses_meta:
section_data.sort_index(axis=1, inplace=True)
# i.e. Quantipy single-coded/numerical data
else:
section_data = pd.get_dummies(self.d()[section])
if self._uses_meta and not self._is_raw_numeric(section):
res_codes = self._get_response_codes(section)
section_data = section_data.reindex(columns=res_codes)
section_data.replace(np.NaN, 0, inplace=True)
section_data.rename(
columns={
col: int(col)
if float(col).is_integer()
else col
for col in section_data.columns
},
inplace=True)
return section_data.values, section_data.columns.tolist()
elif section is None and self.type == 'array':
a_i = [i['source'].split('@')[-1] for i in
self.meta()['masks'][self.x]['items']]
a_res = self._get_response_codes(self.x)
dummies = []
if self._is_multicode_array(a_i[0]):
for i in a_i:
i_dummy = self.d()[i].str.get_dummies(';')
i_dummy.columns = [int(col) for col in i_dummy.columns]
dummies.append(i_dummy.reindex(columns=a_res))
else:
for i in a_i:
dummies.append(pd.get_dummies(self.d()[i]).reindex(columns=a_res))
a_data = pd.concat(dummies, axis=1)
return a_data.values, a_res, a_i
def _clean(self):
"""
Drop empty sectional rows from the matrix.
"""
mat = self.matrix.copy()
mat_indexer = np.expand_dims(self._dataidx, 1)
if not self.type == 'array':
xmask = (np.nansum(mat[:, 1:len(self.xdef)+1], axis=1) > 0)
if self.ydef is not None:
if self.base_all:
ymask = (np.nansum(mat[:, len(self.xdef)+1:-1], axis=1) > 0)
else:
ymask = (np.nansum(mat[:, len(self.xdef)+2:-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(xmask & ymask, 1), mat_indexer], axis=1)
return mat[xmask & ymask]
else:
self.idx_map = np.concatenate(
[np.expand_dims(xmask, 1), mat_indexer], axis=1)
return mat[xmask]
else:
mask = (np.nansum(mat[:, :-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(mask, 1), mat_indexer], axis=1)
return mat[mask]
def _is_raw_numeric(self, var):
return self.meta()['columns'][var]['type'] in ['int', 'float']
def _res_from_count(self):
return self._res_is_margin() or self.current_agg == 'freq'
def _res_from_summarize(self):
return self._res_is_stat() or self.current_agg == 'summary'
def _res_is_margin(self):
return self.current_agg in ['tbase', 'cbase', 'rbase', 'x_sum', 'y_sum']
def _res_is_stat(self):
return self.current_agg in ['mean', 'min', 'max', 'varcoeff', 'sem',
'stddev', 'var', 'median', 'upper_q',
'lower_q']
def to_df(self):
if self.current_agg == 'freq':
if not self.comb_x:
self.x_agg_vals = self.xdef
else:
self.x_agg_vals = self.comb_x
if not self.comb_y:
self.y_agg_vals = self.ydef
else:
self.y_agg_vals = self.comb_y
elif self.current_agg == 'calc':
if self.calc_x:
self.x_agg_vals = self.calc_x
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.calc_y
elif self.current_agg == 'summary':
summary_vals = ['mean', 'stddev', 'min', '25%',
'median', '75%', 'max']
self.x_agg_vals = summary_vals
self.y_agg_vals = self.ydef
elif self.current_agg in ['x_sum', 'cbase']:
self.x_agg_vals = 'All' if self.current_agg == 'cbase' else 'sum'
self.y_agg_vals = self.ydef
elif self.current_agg in ['y_sum', 'rbase']:
self.x_agg_vals = self.xdef
self.y_agg_vals = 'All' if self.current_agg == 'rbase' else 'sum'
elif self._res_is_stat():
if self.factorized == 'x':
self.x_agg_vals = self.current_agg
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.current_agg
# can this made smarter WITHOUT 1000000 IF-ELSEs above?:
if ((self.current_agg in ['freq', 'cbase', 'x_sum', 'summary', 'calc'] or
self._res_is_stat()) and not self.type == 'array'):
if self.y == '@' or self.x == '@':
self.y_agg_vals = '@'
df = pd.DataFrame(self.result)
idx, cols = self._make_multiindex()
df.index = idx
df.columns = cols
self.result = df if not self.x == '@' else df.T
if self.type == 'nested':
self._format_nested_axis()
return self
def _make_multiindex(self):
x_grps = self.x_agg_vals
y_grps = self.y_agg_vals
if not isinstance(x_grps, list):
x_grps = [x_grps]
if not isinstance(y_grps, list):
y_grps = [y_grps]
if not x_grps: x_grps = [None]
if not y_grps: y_grps = [None]
if self._has_x_margin:
x_grps = ['All'] + x_grps
if self._has_y_margin:
y_grps = ['All'] + y_grps
if self.type == 'array':
x_unit = y_unit = self.x
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
x_unit = self.x if not self.x == '@' else self.y
y_unit = self.y if not self.y == '@' else self.x
x_names = y_names = ['Question', 'Values']
x = [x_unit, x_grps]
y = [y_unit, y_grps]
index = pd.MultiIndex.from_product(x, names=x_names)
columns = pd.MultiIndex.from_product(y, names=y_names)
return index, columns
def _format_nested_axis(self):
nest_mi = self._make_nest_multiindex()
if not len(self.result.columns) > len(nest_mi.values):
self.result.columns = nest_mi
else:
total_mi_values = []
for var in self.nest_def['variables']:
total_mi_values += [var, -1]
total_mi = pd.MultiIndex.from_product(total_mi_values,
names=nest_mi.names)
full_nest_mi = nest_mi.union(total_mi)
for lvl, c in zip(list(range(1, len(full_nest_mi)+1, 2)),
self.nest_def['level_codes']):
full_nest_mi.set_levels(['All'] + c, level=lvl, inplace=True)
self.result.columns = full_nest_mi
return None
def _make_nest_multiindex(self):
values = []
names = ['Question', 'Values'] * (self.nest_def['levels'])
for lvl_var, lvl_c in zip(self.nest_def['variables'],
self.nest_def['level_codes']):
values.append(lvl_var)
values.append(lvl_c)
mi = pd.MultiIndex.from_product(values, names=names)
return mi
def normalize(self, on='y'):
"""
Convert a raw cell count result to its percentage representation.
Parameters
----------
on : {'y', 'x'}, default 'y'
Defines the base to normalize the result on. ``'y'`` will
produce column percentages, ``'x'`` will produce row
percentages.
Returns
-------
self
Updates an count-based aggregation in the ``result`` property.
"""
if self.x == '@':
on = 'y' if on == 'x' else 'x'
if on == 'y':
if self._has_y_margin or self.y == '@' or self.x == '@':
base = self.cbase
else:
if self._get_type() == 'array':
base = self.cbase
else:
base = self.cbase[:, 1:]
else:
if self._has_x_margin:
base = self.rbase
else:
base = self.rbase[1:, :]
if isinstance(self.result, pd.DataFrame):
if self.x == '@':
self.result = self.result.T
if on == 'y':
base = np.repeat(base, self.result.shape[0], axis=0)
else:
base = np.repeat(base, self.result.shape[1], axis=1)
self.result = self.result / base * 100
if self.x == '@':
self.result = self.result.T
return self
def rebase(self, reference, on='counts', overwrite_margins=True):
"""
"""
val_err = 'No frequency aggregation to rebase.'
if self.result is None:
raise ValueError(val_err)
elif self.current_agg != 'freq':
raise ValueError(val_err)
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
ref = self.swap(var=reference, inplace=False)
if self._sects_identical(self.xdef, ref.xdef):
pass
elif self._sects_different_order(self.xdef, ref.xdef):
ref.xdef = self.xdef
ref._x_indexers = ref._get_x_indexers()
ref.matrix = ref.matrix[:, ref._x_indexers + [0]]
elif self._sect_is_subset(self.xdef, ref.xdef):
ref.xdef = [code for code in ref.xdef if code in self.xdef]
ref._x_indexers = ref._sort_indexer_as_codes(ref._x_indexers,
self.xdef)
ref.matrix = ref.matrix[:, [0] + ref._x_indexers]
else:
idx_err = 'Axis defintion is not a subset of rebase reference.'
raise IndexError(idx_err)
ref_freq = ref.count(as_df=False)
self.result = (self.result/ref_freq.result) * 100
if overwrite_margins:
self.rbase = ref_freq.rbase
self.cbase = ref_freq.cbase
self._organize_margins(has_margin)
if is_df: self.to_df()
return self
@staticmethod
def _sects_identical(axdef1, axdef2):
return axdef1 == axdef2
@staticmethod
def _sects_different_order(axdef1, axdef2):
if not len(axdef1) == len(axdef2):
return False
else:
if (x for x in axdef1 if x in axdef2):
return True
else:
return False
@staticmethod
def _sect_is_subset(axdef1, axdef2):
return set(axdef1).intersection(set(axdef2)) > 0
class Test(object):
"""
The Quantipy Test object is a defined by a Link and the view name notation
string of a counts or means view. All auxiliary figures needed to arrive
at the test results are computed inside the instance of the object.
"""
def __init__(self, link, view_name_notation, test_total=False):
super(Test, self).__init__()
# Infer whether a mean or proportion test is being performed
view = link[view_name_notation]
if view.meta()['agg']['method'] == 'descriptives':
self.metric = 'means'
else:
self.metric = 'proportions'
self.invalid = None
self.no_pairs = None
self.no_diffs = None
self.parameters = None
self.test_total = test_total
self.mimic = None
self.level = None
# Calculate the required baseline measures for the test using the
# Quantity instance
self.Quantity = qp.Quantity(link, view.weights(), use_meta=True,
base_all=self.test_total)
self._set_baseline_aggregates(view)
# Set information about the incoming aggregation
# to be able to route correctly through the algorithms
# and re-construct a Quantipy-indexed pd.DataFrame
self.is_weighted = view.meta()['agg']['is_weighted']
self.has_calc = view.has_calc()
self.x = view.meta()['x']['name']
self.xdef = view.dataframe.index.get_level_values(1).tolist()
self.y = view.meta()['y']['name']
self.ydef = view.dataframe.columns.get_level_values(1).tolist()
columns_to_pair = ['@'] + self.ydef if self.test_total else self.ydef
self.ypairs = list(combinations(columns_to_pair, 2))
self.y_is_multi = view.meta()['y']['is_multi']
self.multiindex = (view.dataframe.index, view.dataframe.columns)
def __repr__(self):
return ('%s, total included: %s, test metric: %s, parameters: %s, '
'mimicked: %s, level: %s ')\
% (Test, self.test_total, self.metric, self.parameters,
self.mimic, self.level)
def _set_baseline_aggregates(self, view):
"""
Derive or recompute the basic values required by the ``Test`` instance.
"""
grps, exp, compl, calc, exclude, rescale = view.get_edit_params()
if exclude is not None:
self.Quantity.exclude(exclude)
if self.metric == 'proportions' and self.test_total and view._has_code_expr():
self.Quantity.group(grps, expand=exp, complete=compl)
if self.metric == 'means':
aggs = self.Quantity._dispersion(_return_mean=True,
_return_base=True)
self.sd, self.values, self.cbases = aggs[0], aggs[1], aggs[2]
if not self.test_total:
self.sd = self.sd[:, 1:]
self.values = self.values[:, 1:]
self.cbases = self.cbases[:, 1:]
elif self.metric == 'proportions':
if not self.test_total:
self.values = view.dataframe.values.copy()
self.cbases = view.cbases[:, 1:]
self.rbases = view.rbases[1:, :]
self.tbase = view.cbases[0, 0]
else:
agg = self.Quantity.count(margin=True, as_df=False)
if calc is not None:
calc_only = view._kwargs.get('calc_only', False)
self.Quantity.calc(calc, axis='x', result_only=calc_only)
self.values = agg.result[1:, :]
self.cbases = agg.cbase
self.rbases = agg.rbase[1:, :]
self.tbase = agg.cbase[0, 0]
def set_params(self, test_total=False, level='mid', mimic='Dim', testtype='pooled',
use_ebase=True, ovlp_correc=True, cwi_filter=False,
flag_bases=None):
"""
Sets the test algorithm parameters and defines the type of test.
This method sets the test's global parameters and derives the
necessary measures for the computation of the test statistic.
The default values correspond to the SPSS Dimensions Column Tests
algorithms that control for bias introduced by weighting and
overlapping samples in the column pairs of multi-coded questions.
.. note:: The Dimensions implementation uses variance pooling.
Parameters
----------
test_total : bool, default False
If set to True, the test algorithms will also include an existent
total (@-) version of the original link and test against the
unconditial data distribution.
level : str or float, default 'mid'
The level of significance given either as per 'low' = 0.1,
'mid' = 0.05, 'high' = 0.01 or as specific float, e.g. 0.15.
mimic : {'askia', 'Dim'} default='Dim'
Will instruct the mimicking of a software specific test.
testtype : str, default 'pooled'
Global definition of the tests.
use_ebase : bool, default True
If True, will use the effective sample sizes instead of the
the simple weighted ones when testing a weighted aggregation.
ovlp_correc : bool, default True
If True, will consider and correct for respondent overlap when
testing between multi-coded column pairs.
cwi_filter : bool, default False
If True, will check an incoming count aggregation for cells that
fall below a treshhold comparison aggregation that assumes counts
to be independent.
flag_bases : list of two int, default None
If provided, the output dataframe will replace results that have
been calculated on (eff.) bases below the first int with ``'**'``
and mark results in columns with bases below the second int with
``'*'``
Returns
-------
self
"""
# Check if the aggregation is non-empty
# and that there are >1 populated columns
if np.nansum(self.values) == 0 or len(self.ydef) == 1:
self.invalid = True
if np.nansum(self.values) == 0:
self.no_diffs = True
if len(self.ydef) == 1:
self.no_pairs = True
self.mimic = mimic
self.comparevalue, self.level = self._convert_level(level)
else:
# Set global test algorithm parameters
self.invalid = False
self.no_diffs = False
self.no_pairs = False
valid_mimics = ['Dim', 'askia']
if mimic not in valid_mimics:
raise ValueError('Failed to mimic: "%s". Select from: %s\n'
% (mimic, valid_mimics))
else:
self.mimic = mimic
if self.mimic == 'askia':
self.parameters = {'testtype': 'unpooled',
'use_ebase': False,
'ovlp_correc': False,
'cwi_filter': True,
'base_flags': None}
self.test_total = False
elif self.mimic == 'Dim':
self.parameters = {'testtype': 'pooled',
'use_ebase': True,
'ovlp_correc': True,
'cwi_filter': False,
'base_flags': flag_bases}
self.level = level
self.comparevalue, self.level = self._convert_level(level)
# Get value differences between column pairings
if self.metric == 'means':
self.valdiffs = np.array(
[m1 - m2 for m1, m2 in combinations(self.values[0], 2)])
if self.metric == 'proportions':
# special to askia testing: counts-when-independent filtering
if cwi_filter:
self.values = self._cwi()
props = (self.values / self.cbases).T
self.valdiffs = np.array([p1 - p2 for p1, p2
in combinations(props, 2)]).T
# Set test specific measures for Dimensions-like testing:
# [1] effective base usage
if use_ebase and self.is_weighted:
if not self.test_total:
self.ebases = self.Quantity._effective_n(axis='x', margin=False)
else:
self.ebases = self.Quantity._effective_n(axis='x', margin=True)
else:
self.ebases = self.cbases
# [2] overlap correction
if self.y_is_multi and self.parameters['ovlp_correc']:
self.overlap = self._overlap()
else:
self.overlap = np.zeros(self.valdiffs.shape)
# [3] base flags
if flag_bases:
self.flags = {'min': flag_bases[0],
'small': flag_bases[1]}
self.flags['flagged_bases'] = self._get_base_flags()
else:
self.flags = None
return self
# -------------------------------------------------
# Main algorithm methods to compute test statistics
# -------------------------------------------------
def run(self):
"""
Performs the testing algorithm and creates an output pd.DataFrame.
The output is indexed according to Quantipy's Questions->Values
convention. Significant results between columns are presented as
lists of integer y-axis codes where the column with the higher value
is holding the codes of the columns with the lower values. NaN is
indicating that a cell is not holding any sig. higher values
compared to the others.
"""
if not self.invalid:
sigs = self.get_sig()
return self._output(sigs)
else:
return self._empty_output()
def get_sig(self):
"""
TODO: implement returning tstats only.
"""
stat = self.get_statistic()
stat = self._convert_statistic(stat)
if self.metric == 'means':
diffs = | pd.DataFrame(self.valdiffs, index=self.ypairs, columns=self.xdef) | pandas.DataFrame |
import folium
import pandas as pd
#helper method to setup any input dataframe into dictionaries that can be input into OSR isochrone methods and folium maps
def dictSetup(dataframe):
station_dict = dataframe.to_dict(orient='index')
for name, station in station_dict.items():
station['locations'] = [station['Longitude'],station['Latitude']]
return station_dict
#input a Folium Map and Stations dictionary containing ISO data. this will draw the ISO lines on the folium map object
def isoVisualizer(maps,stations, map_icon = ""):
style_function = lambda x: {'color': '#4ef500' if x['properties']['value']<400 else ('#2100f5' if x['properties']['value']<700.0 else '#f50000'),
'fillOpacity' : 0.35 if x['properties']['value']<400 else (0.25 if 400.0<x['properties']['value']<700.0 else 0.05),
'weight':2,
'fillColor' :'#4ef500' if x['properties']['value']<400 else ('#2100f5' if 400.0<x['properties']['value']<700.0 else '#f50000')}
#('#6234eb' if x['properties']['value']==600.0 else '#6234eb')
for name, station in stations.items():
station_iso_temp = station['iso']
if type(station_iso_temp) == str:
station_iso_temp = station_iso_temp.replace("'", '"')
folium.features.GeoJson(station_iso_temp,style_function = style_function).add_to(maps) # Add GeoJson to map
if map_icon!="":
folium.map.Marker(list(reversed(station['locations'])), # reverse coords due to weird folium lat/lon syntax
icon=folium.Icon(color='lightgray',
icon_color='#cc0000',
icon=map_icon,
prefix='fa',
),
popup=station['Name'],
).add_to(maps) # Add apartment locations to map
print("Done!")
#Perform isochrone request and generates a new item in the stations dictionary containing isochrone data for that station.
#this will save the isochrones requested from Open Route Service in dictionaries that we created from dictSetup()
def isoGeoJsonRetriever(parameters,stations,client):
for name, station in stations.items():
print("Retrieving Isochrone of {} station".format(station['Name']))
parameters['locations'] = [station['locations']]
station['iso'] = client.isochrones(**parameters)
print("Success")
return
#helper method to create new dictionary that is a subset of the larger list of dictionaries
#used if you want to separate stations by station lines into smaller dictionaries
def stationSubset(stations,station_list):
return { your_key: stations[your_key] for your_key in station_list }
#method that use input dataframe coordinates to make API calls to ORS to retrieve isochrones for train stations in the dataframe. Will return isochrone maps of a line and dictionary of stations
def toMap(data,line,params_iso,client):
# Set up folium map
if not line in data.values:
print('{} is not in data frame'.format(line))
temp = data['Route Name'].unique()
print('Choose from the following: ')
print(temp)
return
if line != None:
data = data[data['Route Name']==line]
starting_location = (data['Latitude'].iloc[0],data['Longitude'].iloc[0])
mapped = folium.Map(tiles='OpenStreetMap', location=starting_location, zoom_start=11)
stations = dictSetup(data[data['Route Name']==line])
isoGeoJsonRetriever(params_iso,stations,client)
isoVisualizer(mapped,stations)
return mapped,stations
#Will store full isochrone data in a column. a bit messy but i wasn't sure the alternative to do this
def dictToDataFrame(maps,dataframe):
iso_df = pd.DataFrame(columns= list(pd.DataFrame.from_dict(maps[0][1]).T))
for i in range(len(maps)):
temp = | pd.DataFrame.from_dict(maps[i][1]) | pandas.DataFrame.from_dict |
#! /usr/bin/python
import datetime
import json
import os
import pandas
import urllib.request
import time
#define constants
workingDir = os.getcwd()
print(workingDir)
stationID ='114'
yesterdayDate = (datetime.date.today() - datetime.timedelta(1))
todayDate = datetime.date.today()
yesterdayYear = yesterdayDate.year
urlBase = "https://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID="
#&Year=${year}&Month=${month}&Day=14&timeframe=2&submit= Download+Data
#read in the current weather data
weatherDataFile = 'weatherData'+stationID+'.csv'
if os.path.isfile(weatherDataFile)==True:
weatherData = pandas.read_csv(weatherDataFile)
weatherData["Date"] = | pandas.to_datetime(weatherData["Date"]) | pandas.to_datetime |
from collections import Counter
from os import getenv
from pathlib import Path
from matplotlib.pyplot import savefig
from pandas import DataFrame
from . import database
from ..crawler.models import Article
current_path = Path(__file__).parent.resolve()
def test_rank():
# The test is not suitable for CI
if getenv("PLATFORM") == "ci":
return
articles_collection = database.get_collection("articles")
articles = [Article.parse_obj(i) for i in articles_collection.find({})]
all_words_unpacked = [j for i in articles for j in i.words]
count = Counter(all_words_unpacked)
common = count.most_common()
df = | DataFrame(common) | pandas.DataFrame |
# Implementation of random stuff
import json
import torch
import pandas as pd
import pickle
from torch_geometric.data import Data
from pathlib import Path
from itertools import repeat
from collections import OrderedDict
class MetricTracker:
"""
Class implementation for tracking all the metrics.
"""
def __init__(self, *keys, writer=None):
"""
Method to initialize an object of type MetricTracker.
Parameters
----------
self : MetricTracker
Instance of the class
*keys : tuple
Multiple number of non-keyword arguments
writer : SummaryWriter
Writer to log data for consumption and visualization by TensorBoard
Returns
-------
self : BaseDataLoader
Initialized object of class BaseDataLoader
"""
self.writer = writer
self.data = | pd.DataFrame(index=keys, columns=['total', 'counts', 'average']) | pandas.DataFrame |
import xgboost as xgb
from sklearn.impute import SimpleImputer
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
from src.utils.io import load, save
from src.visualization.visualize import *
def get_X_y(data):
X = data[:, 1:]
y = data[:, 1]
return X, y
def get_sliding_windows(X, y, lookback, delay, min_index, max_index):
f = False
for i in range(min_index, max_index):
if not f:
samples = X[i:i + lookback].flatten()
samples = samples.reshape((1, samples.shape[0]))
targets = y[i + lookback:i + lookback + delay]
f = True
else:
temp = X[i:i + lookback].flatten()
samples = np.r_[samples, temp.reshape((1, temp.shape[0]))]
targets = np.r_[targets, y[i + lookback:i + lookback + delay]]
return samples, targets
def get_random_forrest_model(samples, targets):
regressor = RandomForestRegressor(criterion='mae', n_estimators=50, verbose=True, n_jobs=-1)
model = regressor.fit(samples, targets)
return regressor
def get_xgb_model(samples, targets, samples_eval, targets_eval):
regressor = xgb.XGBRegressor()
model = regressor.fit(samples, targets, early_stopping_rounds=10, eval_metric="mae",
eval_set=[(samples, targets), (samples_eval, targets_eval)], verbose=True)
return regressor
def split_data(data, lookback):
values = data.to_numpy()
X, y = get_X_y(values)
imp_mean = SimpleImputer(missing_values=np.nan, strategy='median')
imp_mean.fit(X)
X = imp_mean.transform(X)
# X = pd.DataFrame(X, columns=data.columns.to_list())
# y = pd.DataFrame(y, columns=['Close'])
cut = 1
if cut:
# cutoff first 700 data points
X = X[700:]
y = y[700:]
train_size = int(0.8 * X.shape[0])
# X = data.iloc[:, 1:]
X = X[:, list(range(5,13)) + list(range(14, X.shape[1]))]
# samples = X[:train_size]
# samples_eval = X[train_size:]
#
# targets = y[:train_size]
# targets_eval = y[train_size:]
samples, targets = get_sliding_windows(X, y, lookback, 1, 0, train_size)
samples_eval, targets_eval = get_sliding_windows(X, y, lookback, 1, train_size, X.shape[0] - lookback)
return (samples, targets), (samples_eval, targets_eval)
def build_model(data, lookback):
(samples, targets), (samples_eval, targets_eval) = split_data(data, lookback)
# Select model RandomForrest or XGBooster
# model = get_random_forrest_model(samples, targets)
model = get_xgb_model(samples, targets, samples_eval, targets_eval)
predictions = model.predict(samples_eval)
mse = mean_squared_error(targets_eval, predictions)
mae = mean_absolute_error(targets_eval, predictions)
print("MSE: ", mse)
print("MAE: ", mae)
# plot_targets_vs_predictions(targets_eval, predictions)
return model
def create_feature_labels(features, lookback):
feature_labels = []
features = features.columns[1:]
for i in range(lookback, 0, -1):
feature_labels += [feature + '_' + str(i) for feature in features]
print('n_features: ', len(feature_labels))
feature_labels = np.asarray(feature_labels)
return feature_labels
def calculate_feature_importances(features, lookback, model):
feature_labels = create_feature_labels(features, lookback)
importances = model.feature_importances_.astype('float32')
indices = np.argsort(-importances)
values = np.c_[feature_labels[indices].reshape(feature_labels.shape[0], 1),
importances[indices].reshape(feature_labels.shape[0], 1)]
feature_importances = | pd.DataFrame(data=values, columns=['feature_labels', 'feature_importance']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 17:37:51 2020
@author: sawleen
"""
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
os.chdir('/Users/sawleen/Documents/Leen/Python/stock_analysis')
import data.get_yf_data as get_yf_data #Configured to the same root folder where display_webpg.py resides
import data.get_sgi_data as get_sgi_data #Configured to the same root folder where display_webpg.py resides
import data.get_morningstar_data as get_ms_data
import time
import math
class Update():
#### Get sector summaries (generate main list)
def prep_sector_summaries(self, stocks_map, stock_sectors, new_sectors, new_stocks=None):
summary_all_df = pd.DataFrame([]) # To track for all sectors
start_time = time.time()
# New entries detected
if new_sectors != 'All':
summary_all_df = pd.read_csv('data/sector_summaries/All.csv', index_col=None)
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
health_metrics_dict_all = self.get_all_health_metrics(new_stocks)
for sector_to_update in new_sectors:
print('Sector to update: {}'.format(sector_to_update))
summary_df = pd.read_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index_col=None)
for symbol in new_stocks:
# Update CSV for indiv sector
current_sector = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, ['Sector']].values[0][0]
print('Current stock sector: {}'.format(current_sector))
if current_sector == sector_to_update:
stocks_map_filtered = stocks_map.loc[stocks_map['SGX_Symbol'] == symbol, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector_to_update), index=False)
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv',index=False)
# No new entries but update for ALL sectors
else:
#expected_runtime = int(len(stocks_map)/60*15) # expected time to print to screen
print('Updating summary for all sectors...')
#print('Please hold on for about {}min...'.format(expected_runtime))
summary_all_df = pd.DataFrame([])
# Get all health metrics first
# Health metrics require selenium, which is prone to disconnections
symbols=stocks_map['SGX_Symbol']
health_metrics_dict_all = self.get_all_health_metrics(symbols)
for sector in stock_sectors:
summary_df = pd.DataFrame([])
if sector!= 'All':
stocks_map_filtered = stocks_map.loc[stocks_map['Sector'] == sector, stocks_map.columns]
[summary_df, summary_all_df] = self.get_summary_df(sector, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df)
# Sector summary
summary_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_df.to_csv('data/sector_summaries/{}.csv'.format(sector), index=False)
# All stocks summary
print('Sorting sector summary for ALL stocks...')
summary_all_df.sort_values(['Strategy','Tally','Tally(%)','Dividend (fwd)','PB Ratio'],ascending=[True,False,False,False,True],inplace=True)
summary_all_df.to_csv('data/sector_summaries/All.csv', index=False)
total_time = round((time.time() - start_time)/60,2)
print('Total time taken: {}'.format(total_time))
#### End of prep_sector_summaries
def get_summary_df(self, sector_to_update, stocks_map_filtered, health_metrics_dict_all, summary_df, summary_all_df):
print('Prepping sector summary for {}...'.format(sector_to_update))
for sgx_symbol in stocks_map_filtered['SGX_Symbol']:
print('{}...'.format(sgx_symbol))
yf_data = get_yf_data.Data(sgx_symbol)
industry = yf_data.get_industry()
stats = yf_data.get_basic_stats()
[inc_yoy_avg_growth_df, inc_yrly_growth_df] = yf_data.process_inc_statement()
dividends_df = yf_data.get_dividends()
try:
div_fwd = dividends_df.loc[dividends_df['Dividend Type']=='Forward',['Values']].values[0][0]
except:
print('! Warning: No forward dividend data fetched for {}'.format(sgx_symbol))
div_fwd = math.nan
short_name = yf_data.get_name_short()
disp_name = yf_data.get_name_disp()
if '.SI' in sgx_symbol and type(short_name)==str:
sgi_data = get_sgi_data.Data(sgx_symbol, short_name)
url_tprice = sgi_data.get_sginvestor_url(sgx_symbol, short_name, industry)
#print(url_tprice)
soup_tprice = sgi_data.get_soup_tprice(url_tprice)
tpcalls = sgi_data.get_tpcalls(soup_tprice)
tpcalls_df = sgi_data.get_tpcalls_df(tpcalls)
strategies_summary = sgi_data.get_strategies_summary(tpcalls_df)
else: # create empty dataframe
strategies_summary = pd.DataFrame(index=[0],columns=['Strategy','Tally(%)','Tally'])
health_metrics = health_metrics_dict_all[sgx_symbol]
info={'Name':disp_name,
'Symbol':sgx_symbol,
'Market Cap (bil)':stats['Market Cap (bil)'],
'PB Ratio': stats['PB Ratio'],
'PE Ratio': stats['PE Ratio'],
'Dividend Payout Ratio': stats['Dividend Payout Ratio'],
'Income Growth (Avg YoY)':inc_yoy_avg_growth_df['Income'].values[0],
'ROE': stats['% Return on Equity'],
'Dividend (fwd)': div_fwd,
'Strategy': strategies_summary.at[0,'Strategy'],
'Tally(%)': strategies_summary.at[0,'Tally(%)'],
'Tally': strategies_summary.at[0,'Tally'],
'Price/Cash Flow':health_metrics['Price/Cash Flow'],
'Debt/Equity':health_metrics['Debt/Equity'],
'Interest Coverage':health_metrics['Interest Coverage']}
# Stock summary
info_df = pd.DataFrame.from_dict(info, orient='columns')
# Sector summary
if summary_df.empty:
summary_df = info_df
else:
summary_df = pd.concat([summary_df, info_df])
# All sector summary
if summary_all_df.empty:
summary_all_df = info_df
else:
summary_all_df = pd.concat([summary_all_df, info_df])
return [summary_df, summary_all_df]
def get_all_health_metrics(self, symbols):
print('Do you want to read from pre-generated health metrics?')
user_pref = input()
print('... Getting health metrics...')
# Read from stored data if user wants to save time
if 'y' in user_pref.lower():
print('...from CSV...')
health_metrics_all_df = pd.read_csv('data/health_metrics_all_df.csv',index_col='symbol')
health_metrics_dict_all = health_metrics_all_df.to_dict('index')
else:
# Initialize driver
driver_options = Options()
driver_options.add_argument("--headless") #for chromedriver to work remotely
chromedriver_path = '/usr/local/bin/chromedriver'
driver = webdriver.Chrome(chromedriver_path,options=driver_options)
# Get health metrics
health_metrics_dict_all={}
for sgx_symbol in symbols:
print('...{}...'.format(sgx_symbol))
health_metrics_dict = get_ms_data.Data().get_health_metrics_dict(sgx_symbol, driver)
health_metrics_dict_all[sgx_symbol] = health_metrics_dict
# Close driver
driver.quit()
print('... Metrics stored...')
print(health_metrics_dict_all)
# Option to save to CSV if user wants
print('... Do you want to save to local disk?')
save_health_metrics = input()
if 'y' in save_health_metrics.lower():
#print(health_metrics_dict_all)
# Write to CSV in case want to refer in future
health_metrics_dict_df = pd.DataFrame.from_dict(health_metrics_dict_all).T
health_metrics_dict_df.index.rename('symbol',inplace=True)
#health_metrics_dict_df.reset_index(inplace=True)
saved_health_metrics = pd.read_csv('data/health_metrics_all_df.csv', index_col=['symbol'])
for sgx_symbol in symbols:
print(sgx_symbol)
# Add to saved list if not already inside
if not sgx_symbol in saved_health_metrics.index:
health_metric_symbol = health_metrics_dict_df[health_metrics_dict_df.index==sgx_symbol]
saved_health_metrics = | pd.concat([saved_health_metrics, health_metric_symbol]) | pandas.concat |
import pandas as pd
import math
from numpy import nanmin,nanmax
#maximum number of records in a parquet file (except the index file)
max_rows = 500000
states = ["ACT", "NSW", "NT", "OT", "QLD", "SA", "TAS", "VIC", "WA"]
#states = ["ACT", "WA"]
#initiate the index file
index_file = | pd.DataFrame(columns=['IDX','STREET_NAME','STREET_TYPE_CODE','LOCALITY_NAME','STATE','POSTCODE','FILE_NAME','ADDRESS_COUNT','MIN_STREET_NUMBER','MAX_STREET_NUMBER']) | pandas.DataFrame |
import pydoc
import pandas as pd
import os
import random
def read_excel():
df = pd.read_excel('/Users/ls/Downloads/babycare11-1.xlsx')
data = df.head(2)
print(str(data))
# print(df.head(2))
def merge_excel():
dfs = []
dir = '/Users/ls/babycare/'
des = '/Users/ls/babycare/babycare-stats-7-3.xlsx'
for root, dirs, files in os.walk(dir):
for file in files:
file_name = os.path.join(root, file)
# print(root,"==",file)
if 'babycare-stats-7-' in file_name:
df = pd.read_excel(file_name)
dfs.append(df)
print(len(files))
all_data = | pd.concat(dfs) | pandas.concat |
import streamlit as st
import datetime
import pytz
from datetime import date
from utils.metrics import log_runtime
import pandas as pd
import timeit
short_title = "iterrows() and itertuples()"
long_title = "iterrows() and itertuples()"
key = 6
content_date = datetime.datetime(2021, 10, 5).astimezone(pytz.timezone("US/Eastern"))
assets_dir = "./assets/" + str(key) + '/'
@log_runtime
def render(location: st):
location.markdown(f"## [{long_title}](/?content={key})")
location.write(f"*{content_date.strftime('%m.%d.%Y')}*")
location.write(f"Pandas `iterrows()` and `itertuples()` provide two different ways to `iter`ate over rows in a DataFrame. "
"If you're working with a data set that contains columns with different data "
"types, use caution since `iterrows()` and `itertuples()` may not always return the data you expect. Here's an example based on the "
"Pandas documentation."
)
location.write(f"### iterrows()")
location.write(f"`iterrows()` returns a Pandas `Series`. "
)
# this works
# ser = pd.Series([1,2, 3.0])
# location.write(ser)
# ser = pd.Series(['1','2','3.0'])
# location.write(ser)
# this fails - seems like a streamlit bug
#ser = pd.Series(['1', 2, 3])
#location.write(ser)
# this also fails
# ser = pd.Series([1, '2', 3])
# location.write(ser)
df = pd.DataFrame([[1, 2.0, 3]], columns=['int', 'float', 'cat'])
df['cat'] = df['cat'].astype('category')
location.code(
"""
df = pd.DataFrame([[1, 2.0, 3]], columns=['int', 'float', 'cat']) # Create a dataframe with several different data types
df['cat'] = df['cat'].astype('category') # make 'cat' a categorical type
"""
)
location.write(df)
location.write("You might expect to get back a `Series` containing an `int`, `float` and `category`. But, in this case, `iterrows()` returns a `Series` of `float64`.")
location.code(
"""
_, row = next(df.iterrows())
row
"""
)
i, row = next(df.iterrows())
location.write(row)
location.code(
"""
row['cat'].dtype
"""
)
location.write(row['cat'].dtype)
location.write(f"### itertuples()")
location.write(f"`itertuples()` returns a `NamedTuple`."
)
df = pd.DataFrame([[1, 2.0, 3 ,'str', '2021-01-01']], columns=['int', 'float', 'cat', 'str', 'date'])
df['cat'] = df['cat'].astype('category')
df['date'] = | pd.to_datetime(df['date'], format='%Y-%m-%d') | pandas.to_datetime |
import pandas as pd
from output.helpers import *
from datetime import datetime
import emoji
import re
import string
import nltk
from nltk import ngrams, FreqDist
from nltk.sentiment import SentimentIntensityAnalyzer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn import svm
from sklearn import naive_bayes as nb
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn import neighbors
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import numpy as np
from collections import Counter
import seaborn as sns
import matplotlib.pyplot as plt
class Columns:
ID = 'id'
TIME = 'time'
TEXT = 'text'
LIKES = 'likes'
REACTIONS = 'reactions'
_TS_ADD = '_ts'
YEAR = 'year' + _TS_ADD
MONTH = 'month' + _TS_ADD
DAY = 'weekday' + _TS_ADD
HOUR = 'hour' + _TS_ADD
POST_LEN = 'post_length'
DISTINCT = 'distinct_words'
AVG_WRD = 'avg_wrd_len'
MAX_WRD = 'max_wrd_len'
MIN_WRD = 'min_wrd_len'
NGRAMS = 'ngrams'
NUMBERS = 'numbers'
EX_MARKS = 'exclamation'
def clean(text):
def remove_emojis(txt):
return emoji.get_emoji_regexp().sub(r'EMJ', txt)
def remove_punctuation(txt):
return txt.translate(str.maketrans('', '', string.punctuation))
def remove_url(txt):
return re.sub(r'\(?http\S+\)?', 'URL', remove_emojis(txt))
def squeeze_spaces(txt):
return re.sub(r'\s{2,}', ' ', txt)
def trim(txt):
return txt.strip()
def remove_quotes(txt):
# return re.sub(r'[\"\']', '', txt)
return re.sub(r'\"', '', txt)
text = remove_emojis(text)
text = remove_url(text)
# text = remove_punctuation(text)
text = remove_quotes(text)
text = squeeze_spaces(text)
text = trim(text)
return text
def k_most_common_ngrams(X, ng=2, k=20):
"""
Return most K common trigrams in the files in the input directory.
"""
top_k = FreqDist(ngrams('\n'.join(X).split(), ng))
return [' '.join(t[0]) for t in top_k.most_common(k)]
def k_most_common_char_ngrams(X, ng=2, k=20):
"""
Return most K common trigrams in the files in the input directory.
"""
top_k = FreqDist(ngrams('\n'.join(X), ng))
return [' '.join(t[0]) for t in top_k.most_common(k)]
def get_most_common_k(X, k=50):
counter = Counter(" ".join(X.apply(lambda s: re.sub(f'[{string.punctuation}]', '', s).strip())).split())
return sorted(counter, key=counter.get, reverse=True)[:k]
def get_best_k(X, y, k=20):
# Crete TF-IDF values based on the training data
vectorizer = TfidfVectorizer(use_idf=True)
# vectorizer = CountVectorizer()
tfidf_vals = vectorizer.fit_transform(X)
# create and fit selector
selector = SelectKBest(k=k)
selector.fit(tfidf_vals.toarray(), y)
words_idx = selector.get_support(indices=True)
# get the actual words
the_words = [k for k,v in vectorizer.vocabulary_.items() if v in words_idx]
return the_words
def my_train_test_split(df, label_col_name, test_percentage, shuffle=False):
df2 = df.copy()
if shuffle:
df2 = df2.sample(frac=1).reset_index(drop=True)
data_cols = [c for c, col in enumerate(df2.columns) if col != label_col_name]
train_size = int(df2.shape[0] * (1 - test_percentage))
X_train = df2.iloc[:train_size, data_cols]
y_train = df2.iloc[:train_size][label_col_name]
X_test = df2.iloc[train_size:, data_cols].reset_index(drop=True)
y_test = df2.iloc[train_size:][label_col_name].reset_index(drop=True)
return X_train, X_test, y_train, y_test
def classify(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
if hasattr(clf, 'feature_importances_'):
forest_importances = pd.Series(clf.feature_importances_, index=X_train.columns)
else:
forest_importances = None
return train_score, test_score, forest_importances
def run_classifiers(X_train, y_train, X_test, y_test):
# calculate baseline
print('Baseline (random chance):')
random_chance = y_train.value_counts().max() / len(y_train)
print(f'Train dataset: {random_chance*100:.2f}%')
random_chance = y_test.value_counts().max() / len(y_test)
print(f'Test dataset: {random_chance*100:.2f}%')
print('')
SVM = False
NB = False
DT = False
RF = True
KNN = False
# SVM
if SVM:
print('SVM')
train_score, test_score, _ = classify(svm.SVC(), X_train, y_train, X_test, y_test)
print(f'Training accuracy: {train_score}')
print(f'Test accuracy: {test_score}')
# Naive Bayes
if NB:
print('Naive Bayes')
train_score, test_score, _ = classify(nb.MultinomialNB(), X_train, y_train, X_test, y_test)
print(f'Training accuracy: {train_score}')
print(f'Test accuracy: {test_score}')
# Decision Tree
if DT:
print('Decision Tree')
train_score, test_score, ftr_impr = classify(tree.DecisionTreeClassifier(), X_train, y_train, X_test, y_test)
print(f'Training accuracy: {train_score}')
print(f'Test accuracy: {test_score}')
# Random Forest
if RF:
print('Random Forest')
# max_depth=10, min_samples_leaf=2 reduce overfitting very well (from 99% on training to 70%, without harming test)
train_score, test_score, ftr_impr = classify(RandomForestClassifier(max_depth=10, min_samples_leaf=2, random_state=SEED),
X_train, y_train, X_test, y_test)
print(f'Training accuracy: {train_score}')
print(f'Test accuracy: {test_score}')
print(f'Feature importances:\n{ftr_impr.sort_values(ascending=False)[:20]}')
# KNN
if KNN:
print('KNN')
train_score, test_score, _ = classify(neighbors.KNeighborsClassifier(), X_train, y_train, X_test, y_test)
print(f'Training accuracy: {train_score}')
print(f'Test accuracy: {test_score}')
if __name__ == '__main__':
READ_PREPROCESSED = True
SAVE_TOKENIZED = True # only if READ_TOKENIZED is False
SEED = 42
input_path = Helpers.get_csv_path()
if READ_PREPROCESSED:
input_path = str(input_path).replace('.csv', ' preprocessed.csv')
print('Reading preprocessed csv...')
df = | pd.read_csv(input_path) | pandas.read_csv |
# general imports
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import cv2
import os
from PIL import Image
from pprint import pprint
import time
from tqdm import tqdm
# torch and torchvision
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
import torchvision
import torchvision.transforms as transforms
# catalyst for training and metrics
from catalyst.utils import split_dataframe_train_test
from catalyst.dl.callbacks import AccuracyCallback, AUCCallback
from catalyst.dl import SupervisedRunner
# scikit-learn
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
import timm
from dataset import LeafDataset
from utils import get_transforms
from config import config
import warnings
warnings.simplefilter("ignore")
model = None
def set_global_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
# set the following true for inference otherwise training speed will be slow
torch.backends.cudnn.deterministic = True
def load_timm_model(arch, num_classes, checkpoint_location, location='cpu'):
"""defines backbone architecture and updates final layer"""
model = timm.create_model(
model_name=arch, num_classes=num_classes, pretrained=False)
checkpoint = torch.load(checkpoint_location,
map_location={'cuda:0': location})
pprint(checkpoint['epoch_metrics'])
model.load_state_dict(checkpoint['model_state_dict'])
return model
def run_evaluation(csv_path, img_root, img_transforms, device):
global model
# given model and valid dataset
# iterate over dataset and compute prediction
y_true = []
y_pred = []
y_logits = []
misses = {}
df = pd.read_csv(csv_path)
test_size = len(df)
print(f"Size: {test_size}")
model.eval()
for idx, row in tqdm(df.iterrows(), total=len(df)):
filename = row['image_id']
target = np.argmax(
row[['healthy', 'multiple_diseases', 'rust', 'scab']].values)
img = Image.open(os.path.join(
img_root, filename+'.jpg')).convert('RGB')
img = np.asarray(img)
augmented = img_transforms(image=img)
img_tensor = augmented['image']
img_tensor = img_tensor.unsqueeze(0,)
img_tensor = img_tensor.to(device)
with torch.no_grad():
pred = F.softmax(model(img_tensor)).squeeze().cpu()
probs = pred.numpy()
_, output = torch.topk(pred, 1)
output = output.numpy()[0]
if output != target:
misses[filename] = {
'y_true': target,
'y_pred': probs
}
y_true.append(target)
y_pred.append(output)
y_logits.append(probs)
return y_true, y_pred, y_logits, misses
def read_sample(root: str, filename: str):
img = cv2.imread(os.path.join(root, filename+'.jpg'))
return img
def plot_misses(df, img_root, result_filename):
fig, ax = plt.subplots(nrows=len(df), ncols=2, figsize=(10, 20))
for idx, row in tqdm(df.iterrows(), total=len(df)):
#row = df.iloc[idx]
filename = row['image_id']
print(filename)
true_label = row['y_true']
pred_label = np.argmax(row['y_pred'])
# label = np.argmax(df.ilo[['healthy', 'multiple_diseases', 'rust', 'scab']].values)
label_names = ['healthy', 'multiple_diseases', 'rust', 'scab']
img = read_sample(img_root, filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax[idx][0].imshow(img)
ax[idx][0].axis('off')
ax[idx][0].set_title(
f"{filename}: true: {label_names[true_label]} pred: {label_names[pred_label]}")
sns.barplot(x=label_names, y=row['y_pred'], ax=ax[idx][1])
ax[idx][1].set_title('Probs')
plt.savefig(result_filename)
return
def run_on_held_out(csv_path, img_root, img_transforms, device):
global model
# given model and valid dataset
# iterate over dataset and compute prediction
df = pd.read_csv(csv_path)
test_size = len(df)
print(f"Size: {test_size}")
y_pred = {}
model.eval()
for idx, row in tqdm(df.iterrows(), total=len(df)):
filename = row['image_id']
# target = np.argmax(row[['healthy', 'multiple_diseases', 'rust', 'scab']].values)
img = Image.open(os.path.join(
img_root, filename+'.jpg')).convert('RGB')
img = np.asarray(img)
augmented = img_transforms(image=img)
img_tensor = augmented['image']
img_tensor = img_tensor.unsqueeze(0,)
img_tensor = img_tensor.to(device)
# run prediction
with torch.no_grad():
pred = F.softmax(
model(img_tensor)).squeeze().cpu()
# _,output = torch.topk(pred,1)
output = pred.numpy()
result = {
'healthy': output[0],
'multiple_diseases': output[1],
'rust': output[2],
'scab': output[3]
}
# store results
y_pred[filename] = result
return y_pred
def main():
global model
# setup config
cfg = config()
cfg['device'] = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
# cfg['logdir'] += timestr
set_global_seed(cfg['random_state'])
pprint(cfg)
# load data
train_df = | pd.read_csv(cfg['train_csv_path']) | pandas.read_csv |
"""
This file is for playing around with song data from the MSD data set.
In particular, we are interesting in getting all of the data out in
an exportable manner.
We can't get all of the information from the summary file, we have to
open all files and extract the data to do this.
"""
import os
import pandas as pd
import glob
import hdf5_getters
from sklearn.ensemble import RandomForestClassifier
# change these as appropriate.
training_dataset = './datasets/relax+banger+classical+sleep+study_dataset_cleaned.csv'
msd_subset_path = './datasets/MillionSongSubset'
msd_subset_data_path = os.path.join(msd_subset_path, 'data')
msd_subset_addf_path = os.path.join(msd_subset_path, 'AdditionalFiles')
# Create a mapping of the getter functions available in the hdf5_getters
# library and the names we want to assign their return value to. This
# defines the schema we want to export
getter_func_names = list(filter(lambda x: x.startswith('get'), dir(hdf5_getters)))
getter_mapping = {x[4:]:x for x in getter_func_names}
# functions
def main():
data, cnt = apply_to_all_tracks(msd_subset_data_path, get_song_attr)
print('Exported {} songs'.format(cnt))
def apply_to_all_tracks(basedir, func, ext='.h5'):
"""
Walk the directoy and apply a given function to a track file.
"""
cnt = 0
data = []
clf = get_rf_classifier(training_dataset)
for root, dirs, files in os.walk(basedir):
files = glob.glob(os.path.join(root, '*' + ext))
for f in files:
data.append(func(f, clf))
cnt += 1
print(cnt)
return (data, cnt)
def get_song_attr(file_name, clf):
"""
Apply all possible getters to a track file. this completely exports
all of the data for a given track file.
"""
f = hdf5_getters.open_h5_file_read(file_name)
data = {}
for attr_name, func_name in getter_mapping.items():
data[attr_name] = getattr(hdf5_getters, func_name)(f)
f.close()
data['isBanger'] = isBanger(data, clf)
return data
def get_rf_classifier(training_file):
data = | pd.read_csv(training_file) | pandas.read_csv |
import json
import os
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from numpy import array as nparray
import countryinfo
import pandas as pd
import datetime
import folium
import torch
import numpy as np
def compare_models():
"""
Output a table showing final train/val/test loss by model.
"""
files = os.listdir('results')
files = ['results2021-03-04T16:05:09.json',
'results2021-03-04T16:10:42.json',
'results2021-03-04T16:11:19.json',
'results2021-03-04T16:11:28.json']
losses = {}
# Average final losses across several results files to get a more accurate average...
for file in files:
with open('results/' + file, 'r') as f:
data = json.load(f)
# For each model in the file...
for model, model_data in data['Models'].items():
# Setup keys for new models
if not model in losses:
losses[model] = {
'Train': [],
'Train Evaluation': [],
'Validation': [],
'Test': []
}
# Append the loss for each segment
for segment, loss in model_data['Loss by Epoch'][-1].items():
losses[model][segment].append(loss)
# Calculate the average loss per segment over all the files
for model, model_data in losses.items():
for segment, segment_losses in model_data.items():
losses[model][segment] = "%.3f" % (sum(segment_losses) / len(segment_losses))
# Display in a pyplot table
cellText = list(map(lambda x: list(x.values()), list(losses.values())))
rows = list(losses.keys())
columns = ['Train', 'Train Evaluation', 'Validation', 'Test']
table = plt.table(
cellText=cellText,
rowLabels=rows,
colLabels=columns,
loc='center'
)
table.scale(1, 1.5)
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.box(on=None)
plt.title("Comparison of Models on Week Ahead COVID Case Prediction")
plt.show()
# Output the table as a CSV file
models = list(losses.keys())
for i in range(len(cellText)):
cellText[i].insert(0, models[i])
df = pd.DataFrame(cellText)
df.columns = ["Model"] + columns
df.to_csv("reports/Loss By Epoch Report - " + datetime.datetime.now().isoformat().split(".")[0] + ".csv")
def draw_map(data):
url = (
"https://raw.githubusercontent.com/python-visualization/folium/master/examples/data"
)
state_geo = 'europe.json'
m = folium.Map(location=[48, -102], zoom_start=3, tiles='https://api.mapbox.com/styles/v1/sestinj/cko1roam616pw18pgzg1kv1yh/tiles/256/{z}/{x}/{y}@2x?access_token=<KEY>',
attr='Mapbox attribution')
folium.Choropleth(
geo_data=state_geo,
name="choropleth",
data=data,
columns=["Country", "Our Model"],
key_on="feature.properties.name",
fill_color="YlGnBu",
nan_fill_opacity=1.0,
nan_fill_color="#ffffff",
fill_opacity=1.0,
line_opacity=1.0,
legend_name="Fraction of Cases Missed",
).add_to(m)
folium.LayerControl().add_to(m)
m.save("map.html")
def loss_by_country():
"""
Output a table showing average loss by country for multiple models, along with population and total cases stats
"""
files = os.listdir('results')
files = ['results2021-05-21T11:16:52.json',
]
all_losses = {'train': {}, 'val': {}, 'test': {}}
for segment, losses in all_losses.items():
if not segment == 'val':
continue
# Aggregate across several specified files
for file in files:
with open('results/' + file, 'r') as f:
data = json.load(f)
# For each model...
for model, model_data in data['Models'].items():
# Create keys in dictionary
if not model in losses:
losses[model] = {}
for country in model_data['Loss by Country'][segment].keys():
losses[model][country] = []
# Append losses per country from this file
for country, country_losses in model_data['Loss by Country'][segment].items():
losses[model][country] += country_losses
# Calculate average over all losses per country
for model, model_data in losses.items():
for country, country_losses in model_data.items():
losses[model][country] = "%.3f" % (sum(country_losses) / len(country_losses))
# Get population from countryinfo library
losses["Population"] = {}
countries = list(model_data.keys())
for country in countries:
try:
losses["Population"][country] = countryinfo.CountryInfo(country).population()
except KeyError:
losses["Population"][country] = "?"
# Get total cases per country from original dataset CSV
losses["Total Cases"] = {}
df = pd.read_csv("df.csv")
df = dict(df.sum())
for country in countries:
if country + "_new_cases" in df:
losses["Total Cases"][country] = df[country + "_new_cases"]
else:
losses["Total Cases"][country] = 100000000
# Display in a pyplot table
cellText = list(map(lambda x: list(x.values()), list(losses.values())))
cellText = nparray(cellText).T.tolist()
countries = list(list(losses.values())[0].keys())
columns = list(losses.keys())
table = plt.table(
cellText=cellText,
rowLabels=countries,
colLabels=columns,
loc='center'
)
table.scale(0.4, 0.6)
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.box(on=None)
# plt.show()
# Save this table to a CSV file
for i in range(len(cellText)):
cellText[i].insert(0, countries[i])
df = | pd.DataFrame(cellText) | pandas.DataFrame |
from sys import set_asyncgen_hooks
import streamlit as st
import plotly.graph_objects as go
import pandas as pd
import numpy as np
featuresAbbrev = {'Points' : 'pts',
'Goal Scored' : 'gs_cum',
'Goal Conceded' : 'gc_cum',
'Goal difference' : 'gd',
'Form' : 'form',
'Average goals scored the 4 last game' : ['HPKG', 'APKG'],
'Average shoots on target the 4 last game' : ['HPKST', 'APKST'],
'Average corners obtained the 4 last game' : ['HPKC', 'APKC'],
'Average goals scored' : 'gs_cum',
'Average goals scored at home' : 'avgGoalSHH',
'Average goals scored away' : 'avgGoalSAA',
'Average goals conceded' : 'gc_cum',
'Average goals conceded at home' : 'avgGoalCHH',
'Average goals conceded away' : 'avgGoalCAA'
}
class Visualizer:
def __init__(self, ratings, datasets, statistics, teamsGames):
self._ratings = ratings
self._datasets = datasets
self._statistics = statistics
self._teamsGames = teamsGames
self._featuresAbbrev = featuresAbbrev
self._seasons = self.get_seasons(2005, 2022)
self._matchDay = [i for i in range(1,39)]
self._leagueStats = self.get_league_stats()
self._teamStats = self.get_team_stats()
def get_seasons(self, firstSeason, lastSeason):
return [str(year) +'/'+str(year+1) for year in range(firstSeason, lastSeason)][::-1] # reversed
def get_league_stats(self):
return ['Points', 'Goal Scored', 'Goal Conceded', 'Goal difference', 'Form',
'Average goals scored the 4 last game' ,
'Average shoots on target the 4 last game',
'Average corners obtained the 4 last game']
def get_team_stats(self):
return ['Average goals scored', 'Average goals scored at home',
'Average goals scored away', 'Average goals conceded',
'Average goals conceded at home', 'Average goals conceded away']
def get_features_abbrev(self, userInput):
return self._featuresAbbrev[userInput]
def get_teams_season(self, season):
# team selection in function of the year
year = int(season.split('/')[1])
teams = self._ratings[self._ratings['year'] == year]['name'].to_list()
teams.sort()
return teams, year
def get_league_stat(self, year, season, stat, teams):
fig = go.Figure()
data = self._teamsGames.query('year == '+str(year)+' and team == '+str(teams))
data = data.sort_values(by=['matchDay'], ascending=False)
colStat = self.get_features_abbrev(stat)
for team in teams:
teamData = data.query('team == '+str([team]))
fig.add_trace(go.Scatter(name=team ,mode='lines',
x=teamData['matchDay'], y=teamData[colStat]))
fig.update_layout(
title='Evolution of '+stat+' during season '+str(season),
xaxis_title="Match day",
yaxis_title='Number of '+stat,
font=dict(
size=14,
)
)
return fig
def get_league_average_stat(self, season, stat, teams):
fig = go.Figure()
year = int(season.split('/')[1])
features = ['id','HN', 'AN', 'HPKG', 'APKG', 'HPKST', 'APKST', 'HPKC', 'APKC', 'year']
stats = self.get_features_abbrev(stat)
data = self._datasets.query('year == '+str(year)+' and (HN == '+str(teams)+' or AN == '+str(teams)+')')[features]
for team in teams:
matchDay = [i+5 for i in range(data.shape[0])]
homeTeamData = data.query('HN == '+str([team]))[['id', stats[0]]]
homeTeamData.columns = ['id', 'AVG']
awayTeamData = data.query('AN == '+str([team]))[['id', stats[1]]]
awayTeamData.columns = ['id', 'AVG']
teamData = | pd.concat([homeTeamData, awayTeamData]) | pandas.concat |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.contact_models.contact_model_functions import _draw_nr_of_contacts
from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts
from src.contact_models.contact_model_functions import (
_identify_ppl_affected_by_vacation,
)
from src.contact_models.contact_model_functions import (
calculate_non_recurrent_contacts_from_empirical_distribution,
)
from src.contact_models.contact_model_functions import go_to_daily_work_meeting
from src.contact_models.contact_model_functions import go_to_weekly_meeting
from src.contact_models.contact_model_functions import meet_daily_other_contacts
from src.contact_models.contact_model_functions import reduce_contacts_on_condition
from src.shared import draw_groups
@pytest.fixture
def params():
params = pd.DataFrame()
params["category"] = ["work_non_recurrent"] * 2 + ["other_non_recurrent"] * 2
params["subcategory"] = [
"symptomatic_multiplier",
"positive_test_multiplier",
] * 2
params["name"] = ["symptomatic_multiplier", "positive_test_multiplier"] * 2
params["value"] = [0.0, 0.0, 0.0, 0.0]
params.set_index(["category", "subcategory", "name"], inplace=True)
return params
@pytest.fixture
def states():
"""states DataFrame for testing purposes.
Columns:
- date: 2020-04-01 - 2020-04-30
- id: 50 individuals, with 30 observations each. id goes from 0 to 49.
- immune: bool
- infectious: bool
- age_group: ordered Categorical, either 10-19 or 40-49.
- region: unordered Categorical, ['Overtjssel', 'Drenthe', 'Gelderland']
- n_has_infected: int, 0 to 3.
- cd_infectious_false: int, -66 to 8.
- occupation: Categorical. "working" or "in school".
- cd_symptoms_false: int, positive for the first 20 individuals, negative after.
"""
this_modules_path = Path(__file__).resolve()
states = pd.read_parquet(this_modules_path.parent / "1.parquet")
old_to_new = {old: i for i, old in enumerate(sorted(states["id"].unique()))}
states["id"].replace(old_to_new, inplace=True)
states["age_group"] = pd.Categorical(
states["age_group"], ["10 - 19", "40 - 49"], ordered=True
)
states["age_group"] = states["age_group"].cat.rename_categories(
{"10 - 19": "10-19", "40 - 49": "40-49"}
)
states["region"] = pd.Categorical(
states["region"], ["Overtjssel", "Drenthe", "Gelderland"], ordered=False
)
states["date"] = pd.to_datetime(states["date"], format="%Y-%m-%d", unit="D")
states["n_has_infected"] = states["n_has_infected"].astype(int)
states["cd_infectious_false"] = states["cd_infectious_false"].astype(int)
states["occupation"] = states["age_group"].replace(
{"10-19": "in school", "40-49": "working"}
)
states["cd_symptoms_false"] = list(range(1, 21)) + list(range(-len(states), -20))
states["symptomatic"] = states["cd_symptoms_false"] >= 0
states["knows_infectious"] = False
states["knows_immune"] = False
states["cd_received_test_result_true"] = -100
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
return states
@pytest.fixture
def a_thursday(states):
a_thursday = states[states["date"] == "2020-04-30"].copy()
a_thursday["cd_symptoms_false"] = list(range(1, 21)) + list(
range(-len(a_thursday), -20)
)
a_thursday["symptomatic"] = a_thursday["cd_symptoms_false"] >= 0
a_thursday["work_recurrent_weekly"] = draw_groups(
df=a_thursday,
query="occupation == 'working'",
assort_bys=["region"],
n_per_group=20,
seed=484,
)
return a_thursday
@pytest.fixture
def no_reduction_params():
params = pd.DataFrame()
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params["value"] = 1.0
params = params.set_index(["subcategory", "name"])
return params
# ----------------------------------------------------------------------------
def test_go_to_weekly_meeting_wrong_day(a_thursday):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
contact_params = pd.DataFrame()
group_col_name = "group_col"
day_of_week = "Saturday"
seed = 3931
res = go_to_weekly_meeting(
a_thursday, contact_params, group_col_name, day_of_week, seed
)
expected = pd.Series(False, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False)
def test_go_to_weekly_meeting_right_day(a_thursday, no_reduction_params):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
res = go_to_weekly_meeting(
states=a_thursday,
params=no_reduction_params,
group_col_name="group_col",
day_of_week="Thursday",
seed=3931,
)
expected = pd.Series(False, index=a_thursday.index)
expected[:7] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekend(states, no_reduction_params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["work_saturday"] = [True, True] + [False] * (len(a_saturday) - 2)
a_saturday["work_daily_group_id"] = 333
res = go_to_daily_work_meeting(a_saturday, no_reduction_params, seed=None)
expected = | pd.Series(False, index=a_saturday.index) | pandas.Series |
import urllib.request
import xmltodict, json
# import pygrib
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import time
import urllib.request
import xmltodict
# Query to extract parameter forecasts for one particular place (point)
#
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::multipointcoverage
# &place=thessaloniki
# ¶meters="Temperature, Humidity"
#
def extract_forecasts_place(fmi_addr, my_api_key, data_format, parameters, place ):
request = "getFeature&storedquery_id=fmi::forecast::hirlam::surface::point::" + data_format
query_parameters = ""
for it in range(len(parameters)-1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + "place=" + place + "&" + "parameters=" + query_parameters
print(query, "\n")
with urllib.request.urlopen(query) as fd:
query = xmltodict.parse(fd.read())
return(query)
#---------------------------------------------------------------------------------------
# Query to extract parameter forecasts for a Region Of Interest (grid defined by bbox)
#
# Query made for FMI:
# http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9044-283f62a8c734/wfs?
# request=getFeature&storedquery_id=fmi::forecast::hirlam::surface::grid
# & crs=EPSG::4326
# & bbox='22.890701,40.574326,23.034210,40.67855'
# & parameters=Temperature,Humidity
#
def extract_forecasts_grid(fmi_addr, my_api_key, query_request, data_format, coord_sys, bbox, parameters):
# data_format = grid
request = query_request + data_format
# coordinate system e.g. coord_sys = EPSG::4326
query_crs = coord_sys
# bbox = [22.890701,40.574326,23.034210,40.67855] --- region of Thessaloniki
query_box = ""
for j in range(len(bbox)-1):
query_box += str(bbox[j]) + ","
query_box += str(bbox[len(bbox)-1])
query_parameters = ""
for it in range(len(parameters) - 1):
query_parameters += parameters[it] + ","
query_parameters += parameters[len(parameters)-1]
query = fmi_addr + my_api_key + "/wfs" + "?" + "request" + "=" + request + "&" + \
"crs=" + query_crs + "&" + "bbox=" + query_box + "&" + "parameters=" + query_parameters
print("Query made for FMI: \n{}\n".format(query))
with urllib.request.urlopen(query) as fd:
response = xmltodict.parse(fd.read())
return(response)
#-----------------------------------------------------------------------------
# Query to extract values from a grib file in data.frame (dset)
# Columns names of data.frame are:
# ['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value']
#
def extract_gribs(dataDICT):
# gml:fileReference to key for the FTP
# path for the value we need , for downloading grib2 file
FTPurl = dataDICT['wfs:FeatureCollection']['wfs:member'][1]['omso:GridSeriesObservation']['om:result']['gmlcov:RectifiedGridCoverage']['gml:rangeSet']['gml:File']['gml:fileReference']
print("Query for downloading grb file with the values asked: \n{}\n".format(FTPurl))
# Create the grib2 file
result = urllib.request.urlopen(FTPurl)
with open('gribtest.grib2', 'b+w') as f:
f.write(result.read())
gribfile = 'gribtest.grib2' # Grib filename
grb = pygrib.open(gribfile)
# Creation of dictionary, for parameters : metric system
paremeters_units = {
"Mean sea level pressure": "Pa", "Orography": "meters", "2 metre temperature": "°C",
"2 metre relative humidity": "%",
"Mean wind direction": "degrees",
"10 metre wind speed": "m s**-1",
"10 metre U wind component": "m s**-1",
"10 metre V wind component": "m s**-1",
"surface precipitation amount, rain, convective": "kg m**-2", "2 metre dewpoint temperature": "°C"}
# Create a data frame to keep all the measurements from the grib file
dset = | pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value']) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
import re
import string
@pd.api.extensions.register_dataframe_accessor('zookeeper')
class ZooKeeper:
def __init__(self, pandas_obj):
# validate and assign object
self._validate(pandas_obj)
self._obj = pandas_obj
# define incorporated modules - columns consisting of others will not have the dtype changed
self._INCORPORATED_MODULES = ['builtins', 'numpy', 'pandas']
# define a possible list of null values
self._NULL_VALS = [None, np.nan, 'np.nan', 'nan', np.inf, 'np.inf', 'inf', -np.inf, '-np.inf', '', 'n/a', 'na',
'N/A', 'NA', 'unknown', 'unk', 'UNKNOWN', 'UNK']
# assign dtypes and limits
# boolean
BOOL_STRINGS_TRUE = ['t', 'true', 'yes', 'on']
BOOL_STRINGS_FALSE = ['f', 'false', 'no', 'off']
self._BOOL_MAP_DICT = {i: True for i in BOOL_STRINGS_TRUE}.update({i: False for i in BOOL_STRINGS_FALSE})
self._DTYPE_BOOL_BASE = np.bool
self._DTYPE_BOOL_NULLABLE = pd.BooleanDtype()
# unsigned integers - base and nullable
self._DTYPES_UINT_BASE = [np.uint8, np.uint16, np.uint32, np.uint64]
self._DTYPES_UINT_NULLABLE = [pd.UInt8Dtype(), pd.UInt16Dtype(), pd.UInt32Dtype(), pd.UInt64Dtype()]
self._LIMIT_LOW_UINT = [np.iinfo(i).min for i in self._DTYPES_UINT_BASE]
self._LIMIT_HIGH_UINT = [np.iinfo(i).max for i in self._DTYPES_UINT_BASE]
# signed integers - base and nullable
self._DTYPES_INT_BASE = [np.int8, np.int16, np.int32, np.int64]
self._DTYPES_INT_NULLABLE = [pd.Int8Dtype(), pd.Int16Dtype(), pd.Int32Dtype(), pd.Int64Dtype()]
self._LIMIT_LOW_INT = [np.iinfo(i).min for i in self._DTYPES_INT_BASE]
self._LIMIT_HIGH_INT = [np.iinfo(i).max for i in self._DTYPES_INT_BASE]
# floats - nullable by default
self._DTYPES_FLOAT = [np.float16, np.float32, np.float64]
# datetime - nullable by default
self._DTYPE_DATETIME = np.datetime64
# string
self._DTYPE_STRING = pd.StringDtype()
# categorical - nullable by default
self._DTYPE_CATEGORICAL = pd.CategoricalDtype()
@staticmethod
def _validate(obj):
# any necessary validations here (raise AttributeErrors, etc)
# todo check isinstance(df, pd.DataFrame) and/or df.empty?
pass
# todo add other methods
"""
automate data profiling
- pandas_profiling
- missingo
- any others?
unit handling
- column unit attributes
- unit conversion
- column descriptions
automate machine learning pre-processing
- imputation
- scaling
- encoding
"""
def simplify_columns(self):
# todo add any other needed simplifications
# get columns
cols = self._obj.columns.astype('str')
# replace punctuation and whitespace with underscore
chars = re.escape(string.punctuation)
cols = [re.sub(r'[' + chars + ']', '_', col) for col in cols]
cols = ['_'.join(col.split('\n')) for col in cols]
cols = [re.sub('\s+', '_', col) for col in cols]
# drop multiple underscores to a single one
cols = [re.sub('_+', '_', col) for col in cols]
# remove trailing or leading underscores
cols = [col[1:] if col[0] == '_' else col for col in cols]
cols = [col[:-1] if col[-1] == '_' else col for col in cols]
# convert to lower case
cols = [col.lower() for col in cols]
# reassign column names
self._obj.columns = cols
def _minimize_memory_col_int(self, col):
# get range of values
val_min = self._obj[col].min()
val_max = self._obj[col].max()
# check whether signed or unsigned
bool_signed = val_min < 0
# check for null values
bool_null = np.any(pd.isna(self._obj[col]))
# get conversion lists
if bool_signed:
val_bins_lower = self._LIMIT_LOW_INT
val_bins_upper = self._LIMIT_HIGH_INT
if bool_null:
val_dtypes = self._DTYPES_INT_NULLABLE
else:
val_dtypes = self._DTYPES_INT_BASE
else:
val_bins_lower = self._LIMIT_LOW_UINT
val_bins_upper = self._LIMIT_HIGH_UINT
if bool_null:
val_dtypes = self._DTYPES_UINT_NULLABLE
else:
val_dtypes = self._DTYPES_UINT_BASE
# apply conversions
idx = max(np.where(np.array(val_bins_lower) <= val_min)[0][0],
np.where(np.array(val_bins_upper) >= val_max)[0][0])
self._obj[col] = self._obj[col].astype(val_dtypes[idx])
def _minimize_memory_col_float(self, col, tol):
if np.sum(self._obj[col] - self._obj[col].apply(lambda x: round(x, 0))) == 0:
# check if they are actually integers (no decimal values)
self._minimize_memory_col_int(col)
else:
# find the smallest float dtype that has an error less than the tolerance
for i_dtype in self._DTYPES_FLOAT:
if np.abs(self._obj[col] - self._obj[col].astype(i_dtype)).max() <= tol:
self._obj[col] = self._obj[col].astype(i_dtype)
break
def reduce_memory_usage(self, tol_float=1E-6, category_fraction=0.5, drop_null_cols=True, drop_null_rows=True,
reset_index=False, print_reduction=False, print_warnings=True):
# get the starting memory usage - optional because it can add significant overhead to run time
if print_reduction:
mem_start = self._obj.memory_usage(deep=True).values.sum()
# null value handling
# apply conversions for null values
self._obj.replace(self._NULL_VALS, pd.NA, inplace=True)
# drop null columns and rows
if drop_null_cols:
self._obj.dropna(axis=1, how='all', inplace=True)
if drop_null_rows:
self._obj.dropna(axis=0, how='all', inplace=True)
# replace boolean-like strings with booleans
self._obj.replace(self._BOOL_MAP_DICT, inplace=True)
# loop by column to predict value
for i_col, i_dtype in self._obj.dtypes.to_dict().items():
# skip if column is ful of nulls and wasn't dropped
if not drop_null_cols:
if np.all(pd.isna(self._obj[i_col])):
continue
# get non-null values and the unique modules
vals_not_null = self._obj.loc[pd.notna(self._obj[i_col]), i_col].values
modules = np.unique([type(val).__module__.split('.')[0] for val in vals_not_null])
# skip if col contains non-supported modules
if np.any([val not in self._INCORPORATED_MODULES for val in modules]):
continue
# check if any null values are present
null_vals_present = np.any(pd.isna(self._obj[i_col]))
# check and assign dtypes
# todo add option to coerce small number of values and still proceed with dtype application
if pd.isna( | pd.to_numeric(vals_not_null, errors='coerce') | pandas.to_numeric |
from bert_embedding import BertEmbedding
#from bert_serving.client import BertClient
from flask import Flask, render_template, request
import os
import json
import requests
import pickle
import joblib
import numpy as np
import pandas as pd
#import tensorflow as tf
#all packages
import nltk
import string
import re
import random
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from textblob import TextBlob
from textblob.sentiments import *
import re
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr = WordNetLemmatizer()
app = Flask(__name__)
app.static_folder = 'static'
sent_bertphrase_embeddings = joblib.load('model/questionembedding.dump')
sent_bertphrase_ans_embeddings = joblib.load('model/ansembedding.dump')
stop_w = stopwords.words('english')
#bc = BertClient(ip='localhost')
df = pd.read_csv("model/20200325_counsel_chat.csv",encoding="utf-8")
def get_embeddings(texts):
url = '5110e4cb8b63.ngrok.io' #will change
headers = {
'content-type':'application/json'
}
data = {
"id":123,
"texts":texts,
"is_tokenized": False
}
data = json.dumps(data)
r = requests.post("http://"+url+"/encode", data=data, headers=headers).json()
return r['result']
def clean(column,df,stopwords=False):
df[column] = df[column].apply(str)
df[column] = df[column].str.lower().str.split()
#remove stop words
if stopwords:
df[column]=df[column].apply(lambda x: [item for item in x if item not in stop_w])
#remove punctuation
df[column]=df[column].apply(lambda x: [item for item in x if item not in string.punctuation])
df[column]=df[column].apply(lambda x: " ".join(x))
def retrieveAndPrintFAQAnswer(question_embedding,sentence_embeddings,FAQdf): #USE BOTH QUESTION AND ANSWER EMBEDDINGS FOR CS
max_sim=-1
index_sim=-1
valid_ans = []
for index,faq_embedding in enumerate(sentence_embeddings):
#sim=cosine_similarity(embedding.reshape(1, -1),question_embedding.reshape(1, -1))[0][0];
sim=cosine_similarity(faq_embedding,question_embedding)[0][0]
#print(index, sim, sentences[index])
if sim>=max_sim:
max_sim=sim
index_sim=index
valid_ans.append(index_sim) #get all possible valid answers with same confidence
#Calculate answer with highest cosine similarity
max_a_sim=-1
answer=""
for ans in valid_ans:
answer_text = FAQdf.iloc[ans,8] #answer
answer_em = sent_bertphrase_ans_embeddings[ans] #get embedding from index
similarity = cosine_similarity(answer_em,question_embedding)[0][0]
if similarity>max_a_sim:
max_a_sim = similarity
answer = answer_text
#print("\n")
#print("Question: ",question)
#print("\n");
#print("Retrieved: "+str(max_sim)+" ",FAQdf.iloc[index_sim,3]) # 3 is index for q text
#print(FAQdf.iloc[index_sim,8]) # 8 is the index for the answer text
#check confidence level
if max_a_sim<0.70:
return "Could you please elaborate your situation more? I don't really understand."
return answer
#print(answer)
def retrieve(sent_bertphrase_embeddings,example_query): # USE ONLY QUESTION/ANSWER EMBEDDINGS CS
max_=-1
max_i = -1
for index,emb in enumerate(sent_bertphrase_embeddings):
sim_score = cosine_similarity(emb,example_query)[0][0]
if sim_score>max_:
max_=sim_score
max_i=index
#print("\n");
#print("Retrieved: "+str(max_)+" ",df.iloc[max_i,3]) # 3 is index for q text
#print(df.iloc[max_i,8]) # 8 is the index for the answer text
return str(df.iloc[max_i,8])
def clean_text(greetings):
greetings = greetings.lower()
greetings = ' '.join(word.strip(string.punctuation) for word in greetings.split())
re.sub(r'\W+', '',greetings)
greetings = lmtzr.lemmatize(greetings)
return greetings
def predictor(userText):
data = [userText]
x_try = | pd.DataFrame(data,columns=['text']) | pandas.DataFrame |
from datetime import date as dt
import numpy as np
import pandas as pd
import pytest
import talib
import os
from finance_tools_py.simulation import Simulation
from finance_tools_py.simulation.callbacks import talib as cb_talib
from finance_tools_py.simulation import callbacks
@pytest.fixture
def init_global_data():
pytest.global_code = '000001'
pytest.global_data = pd.DataFrame({
'code': [pytest.global_code for x in range(1998, 2020)],
'date': [dt(y, 1, 1) for y in range(1998, 2020)],
'close':
np.random.random((len(list(range(1998, 2020))), )),
'high':
np.random.random((len(list(range(1998, 2020))), )),
'low':
np.random.random((len(list(range(1998, 2020))), )),
})
@pytest.fixture
def mock_data():
pytest.mock_code = '600036'
if "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true":
pytest.mock_data = pd.read_csv('tests/data/600036.csv', index_col=None)
else:
pytest.mock_data = pd.read_csv('data/600036.csv', index_col=None)
pytest.mock_data['date'] = pd.to_datetime(pytest.mock_data['date'])
def _mock_data(size):
return pd.DataFrame({
'close': np.random.random((len(list(range(size))), )),
'high': np.random.random((len(list(range(size))), )),
'low': np.random.random((len(list(range(size))), )),
})
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_BBANDS():
print('>>> from finance_tools_py.simulation.callbacks.talib import BBANDS')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import BBANDS
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
u = 2.4
d = 2.7
print('>>> t = {}'.format(t))
print('>>> u = {}'.format(u))
print('>>> d = {}'.format(d))
print(">>> s = Simulation(data,'',callbacks=[BBANDS(t, u, d)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[BBANDS(t, u, d)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'bbands' in col]")
cols = [col for col in s.data.columns if 'bbands' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_WILLR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import WILLR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import WILLR
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)],\n\
'high': [y for y in range(0.1, 8.2)],\n\
'low': [y for y in range(0.2, 8.2)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(0.0, 8.0)],
'high': [y for y in np.arange(0.1, 8.1)],
'low': [y for y in np.arange(0.2, 8.2)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[WILLR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[WILLR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'willr' in col]")
cols = [col for col in s.data.columns if 'willr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_CCI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import CCI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import CCI
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[CCI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[CCI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'cci' in col]")
cols = [col for col in s.data.columns if 'cci' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_ATR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import ATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import ATR
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[ATR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[ATR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'atr' in col]")
cols = [col for col in s.data.columns if 'atr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_LINEARREG_SLOPE(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import ATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import LINEARREG_SLOPE
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[LINEARREG_SLOPE('close',t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[LINEARREG_SLOPE('close',t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'lineSlope' in col]")
cols = [col for col in s.data.columns if 'lineSlope' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_TRANGE(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import TRANGE')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import TRANGE
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
print(">>> s = Simulation(data,'',callbacks=[TRANGE()])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[TRANGE()])
s.simulate()
print(">>> cols = [col for col in data.columns if 'trange' in col]")
cols = [col for col in s.data.columns if 'trange' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_NATR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import NATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import NATR
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[NATR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[NATR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'atr' in col]")
cols = [col for col in s.data.columns if 'natr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_MFI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import MFI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import MFI
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1, 15.0)],\n\
'low': [y for y in range(0.0, 4.9)],\n\
'volume': [y for y in range(50.0, 100.0, 10.0)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)],
'volume': [y for y in np.arange(50.0, 100.0, 10.0)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[MFI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[MFI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'mfi' in col]")
cols = [col for col in s.data.columns if 'mfi' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_SMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import SMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import SMA
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({'close': [y for y in np.arange(5.0, 10.0)]})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[SMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[SMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'sma' in col]")
cols = [col for col in s.data.columns if 'sma' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_EMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import EMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import EMA
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({'close': [y for y in np.arange(5.0, 10.0)]})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[EMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[EMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'ema' in col]")
cols = [col for col in s.data.columns if 'ema' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_DEMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import DEMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import DEMA
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[DEMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[DEMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'dema' in col]")
cols = [col for col in s.data.columns if 'dema' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_WMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import WMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import WMA
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[WMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[WMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'dema' in col]")
cols = [col for col in s.data.columns if 'wma' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_RSI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import RSI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import RSI
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[RSI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[RSI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'rsi' in col]")
cols = [col for col in s.data.columns if 'rsi' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_Rolling_Future(init_global_data):
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
print(">>> print(data['close'].values)")
data = pd.DataFrame({'close': [y for y in range(0, 8)]})
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print('>>> print(Rolling_Future(t).on_preparing_data(data))')
callbacks.Rolling_Future(t).on_preparing_data(data)
print(">>> cols=[col for col in data.columns if col!='close']")
cols = [col for col in data.columns if col != 'close']
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(data[col].values, 2)))
def test_BBANDS(init_global_data):
t = 5
u = 2
d = 2
b = cb_talib.BBANDS(t, u, d)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col_up = 'bbands_{}_{}_{}_up'.format(t, u, d) # 布林带上线
col_mean = 'bbands_{}_{}_{}_mean'.format(t, u, d) # 布林带中线
col_low = 'bbands_{}_{}_{}_low'.format(t, u, d) # 布林带下线
assert col_up in pytest.global_data.columns
assert col_mean in pytest.global_data.columns
assert col_low in pytest.global_data.columns
up, mean, low = talib.BBANDS(pytest.global_data['close'], t, u, d)
assert pd.Series.equals(up, pytest.global_data[col_up])
assert pd.Series.equals(mean, pytest.global_data[col_mean])
assert pd.Series.equals(low, pytest.global_data[col_low])
def test_SMA(init_global_data):
t = 5
b = cb_talib.SMA(t)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col = 'sma_close_{}'.format(t)
assert col in pytest.global_data.columns
real = talib.SMA(pytest.global_data['close'], t)
assert pd.Series.equals(real, pytest.global_data[col])
def test_WMA(init_global_data):
t = 5
b = cb_talib.WMA(t)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col = 'wma_close_{}'.format(t)
assert col in pytest.global_data.columns
real = talib.WMA(pytest.global_data['close'], t)
assert | pd.Series.equals(real, pytest.global_data[col]) | pandas.Series.equals |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF']
import numpy as np
import pandas as pd
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def convertToLabeledDF(sqlCtx, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 17:29:16 2018
@author: jdkern
"""
from __future__ import division
import pandas as pd
import numpy as np
def exchange(year):
df_data = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_daily_interchange.csv',header=0)
paths = ['SALBRYNB', 'ROSETON', 'HQ_P1_P2', 'HQHIGATE', 'SHOREHAM', 'NORTHPORT']
df_data = df_data[paths]
df_data = df_data.loc[year*365:year*365+364,:]
# select dispatchable imports (positive flow days)
imports = df_data
imports = imports.reset_index()
for p in paths:
for i in range(0,len(imports)):
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
else:
pass
imports_total = imports.copy()
imports_total.rename(columns={'SALBRYNB':'NB_imports_ME'}, inplace=True)
imports_total['HYDRO_QUEBEC'] = imports_total['HQ_P1_P2'] + imports_total['HQHIGATE']
imports_total['NEWYORK'] = imports_total['ROSETON'] + imports_total['SHOREHAM'] + imports_total['NORTHPORT']
imports_total['NY_imports_CT'] = imports_total['NEWYORK'].mul(4).div(9)
imports_total['NY_imports_WCMA'] = imports_total['NEWYORK'].div(9)
imports_total['NY_imports_VT'] = imports_total['NEWYORK'].mul(4).div(9)
imports_total.rename(columns={'HYDRO_QUEBEC':'HQ_imports_VT'}, inplace=True)
del imports_total['ROSETON']
del imports_total['HQ_P1_P2']
del imports_total['HQHIGATE']
del imports_total['SHOREHAM']
del imports_total['NORTHPORT']
del imports_total['NEWYORK']
imports_total.to_csv('Path_setup/NEISO_dispatchable_imports.csv')
# hourly exports
df_data = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_daily_interchange.csv',header=0)
df_data = df_data[paths]
df_data = df_data.loc[year*365:year*365+364,:]
df_data = df_data.reset_index()
e = np.zeros((8760,len(paths)))
#SALBRYNB
path_profiles = | pd.read_excel('Path_setup/NEISO_path_export_profiles.xlsx',sheet_name='SALBRYNB',header=None) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([pd.Series(aa), pd.Series(molecular_weight)], axis=1)
aa_molecular_weight = aa_molecular_weight.rename(columns={0:'amino_acid', 1: 'molecular_weight_value'})
#FASG760103
optical_rotation = [1.80, 12.50, -5.60, 5.05, -16.50, 6.30, 12.00, 0.00, -38.50, 12.40, -11.00, 14.60, -10.00, -34.50, -86.20,
-7.50, -28.00, -33.70, -10.00, 5.63]
aa_optical_rotation = pd.concat([pd.Series(aa), pd.Series(optical_rotation)], axis=1)
aa_optical_rotation = aa_optical_rotation.rename(columns={0:'amino_acid', 1: 'optical_rotation_value'})
#secondary structure #LEVJ860101
#https://pybiomed.readthedocs.io/en/latest/_modules/CTD.html#CalculateCompositionSolventAccessibility
#SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
secondary_structure = [1, 1, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2]
aa_secondary_structure = pd.concat([pd.Series(aa), pd.Series(secondary_structure)], axis=1)
aa_secondary_structure = aa_secondary_structure.rename(columns={0:'amino_acid', 1: 'secondary_structure_value'})
#_SolventAccessibility = {'-1': 'ALFCGIVW', '1': 'RKQEND', '0': 'MPSTHY'}
# '-1'stand for Buried; '1'stand for Exposed, '0' stand for Intermediate
solvent_accessibility = [-1, 1, 1, 1, -1, 1, 1, -1, 0, -1, -1, 1, 0, -1, 0, 0, 0, -1, 0, -1]
aa_solvent_accessibility = pd.concat([pd.Series(aa), pd.Series(solvent_accessibility)], axis=1)
aa_solvent_accessibility = aa_solvent_accessibility.rename(columns={0:'amino_acid', 1: 'solvent_accessibility_value'})
############################################################################################################################################
#CHAM820102 Free energy of solution in water
free_energy_solution = [-0.368, -1.03, 0.0, 2.06, 4.53, 0.731, 1.77, -0.525, 0.0, 0.791, 1.07, 0.0, 0.656, 1.06, -2.24, -0.524, 0.0, 1.60, 4.91, 0.401]
aa_free_energy_solution = pd.concat([pd.Series(aa), pd.Series(free_energy_solution)], axis=1)
aa_free_energy_solution = aa_free_energy_solution.rename(columns={0:'amino_acid', 1: 'free_energy_solution_value'})
#FAUJ880109 Number of hydrogen bond donors
number_of_hydrogen_bond = [0, 4, 2, 1, 0, 2, 1, 0, 1, 0, 0, 2, 0, 0, 0, 1, 1, 1, 1, 0]
aa_number_of_hydrogen_bond = pd.concat([pd.Series(aa), pd.Series(number_of_hydrogen_bond)], axis=1)
aa_number_of_hydrogen_bond = aa_number_of_hydrogen_bond.rename(columns={0:'amino_acid', 1: 'number_of_hydrogen_bond_value'})
#PONJ960101 Average volumes of residues
volumes_of_residues = [91.5, 196.1, 138.3, 135.2, 114.4, 156.4, 154.6, 67.5, 163.2, 162.6, 163.4, 162.5, 165.9, 198.8, 123.4, 102.0, 126.0, 209.8, 237.2, 138.4]
aa_volumes_of_residues = pd.concat([pd.Series(aa), pd.Series(volumes_of_residues)], axis=1)
aa_volumes_of_residues = aa_volumes_of_residues.rename(columns={0:'amino_acid', 1: 'volumes_of_residues_value'})
#JANJ790102
transfer_free_energy = [0.3, -1.4, -0.5, -0.6, 0.9, -0.7, -0.7, 0.3, -0.1, 0.7, 0.5, -1.8, 0.4, 0.5, -0.3, -0.1, -0.2, 0.3, -0.4, 0.6]
aa_transfer_free_energy = pd.concat([pd.Series(aa), pd.Series(transfer_free_energy)], axis=1)
aa_transfer_free_energy = aa_transfer_free_energy.rename(columns={0:'amino_acid', 1: 'transfer_free_energy_value'})
#WARP780101 amino acid side-chain interactions in 21 proteins
side_chain_interaction = [10.04, 6.18, 5.63, 5.76, 8.89, 5.41, 5.37, 7.99, 7.49, 8.7, 8.79, 4.40, 9.15, 7.98, 7.79, 7.08, 7.00, 8.07, 6.90, 8.88]
aa_side_chain_interaction = pd.concat([pd.Series(aa), pd.Series(side_chain_interaction)], axis=1)
aa_side_chain_interaction = aa_side_chain_interaction.rename(columns={0:'amino_acid', 1: 'side_chain_interaction_value'})
#KARS160101
number_of_vertices = [2.00, 8.00, 5.00, 5.00, 3.00, 6.00, 6.00, 1.00, 7.00, 5.00, 5.00, 6.00, 5.00, 8.00, 4.00, 3.00, 4.00, 11.00, 9.00, 4.00]
aa_number_of_vertices = pd.concat([ | pd.Series(aa) | pandas.Series |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import os.path
import time
import sys
import pandas as pd
work_path = sys.path[0]+'/work_space'
def hetero_to_homo(filepath,jobid,role,garblers):
"""异构图转同构图,并存储"""
hetero_df = pd.read_csv(filepath,index_col=None,header=None,sep=" ")
fold_name = os.path.join(work_path,jobid,role,f'input')
# print(fold_name)
if not os.path.exists(fold_name):
os.makedirs(fold_name)
edge_and_lenght = {}
vertex_df = hetero_df[hetero_df.iloc[:,2]==0]
edge_types = set(hetero_df.iloc[:,2].tolist())-set([0])
for e in edge_types:
temp_df = hetero_df[hetero_df.iloc[:,2]==e]
temp_df = pd.concat([temp_df,vertex_df],axis=0)
if (temp_df.shape[0])%garblers!=0:
zero_df = pd.DataFrame([[0,0,0] for i in range(garblers-temp_df.shape[0]%garblers)])
temp_df = pd.concat([temp_df,zero_df],axis=0)
temp_df.to_csv(fold_name+f"/edgeType{e}.csv",index=None,columns=None,header=None,sep=" ")
edge_and_lenght[e] = temp_df.shape[0]
return edge_and_lenght
if __name__ == "__main__":
import click
@click.command()
@click.option("--jobid", required=True, type=str, help="jobid")
@click.option("--experiments", required=True, type=str, help="任务类型")
@click.option("--role", required=True, type=str, help="角色guest/host")
@click.option("--input_length", required=True, type=int, help="输入数据长度'")
@click.option("--garblers", required=False, type=int, default=1, help="进程数,默认单进程")
@click.option("--num_of_edge_type", required=False, type=int, default=1, help="边类型数目'")
def start(jobid, experiments,role, input_length,garblers,num_of_edge_type):
print(experiments.split('.'))
assert input_length>0
edge_and_lenght = hetero_to_homo(f"./in/HeteroGraph{input_length}.in",jobid,role,garblers)
if role == 'guest':
subprocess.call("./clear_ports.sh", shell=True)
inputs_length_str = ','.join([str(edge_and_lenght[edge]) for edge in range(1,num_of_edge_type+1)])
print(inputs_length_str)
if role == 'guest':
# subprocess.call("./clear_ports.sh", shell=True)
params = str(garblers) + " " + inputs_length_str + " " + experiments + " 00 REAL false " +str(num_of_edge_type)+" "+jobid
print(params)
print('role guest Alice')
subprocess.call(["./run_garblers.sh " + params], shell=True)
elif role=='host':
params = str(garblers) + " " + inputs_length_str + " " + experiments + " 00 REAL false " +str(num_of_edge_type)+" "+jobid
print(params)
print('role host Bob')
subprocess.call(["./run_evaluators.sh " + params], shell=True)
if role == 'guest':
res_df = None
for experiment in experiments.split(","):
for EdgeType in range(1,num_of_edge_type+1):
task = experiment.split('.')[-1]
fold = os.path.join(work_path,jobid,role,f'result/edgeType{EdgeType}',task)
for garbid in range(garblers):
path = fold+f'{garbid}.csv'
try:
df_of_garbid = | pd.read_csv(path,index_col=None,header=None,sep=" ",engine = "python") | pandas.read_csv |
from ctypes import sizeof
import traceback
from matplotlib.pyplot import axis
import pandas as pd
import numpy as np
from datetime import datetime
from time import sleep
from tqdm import tqdm
import random
import warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.datasets import load_linnerud, make_multilabel_classification
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import LinearSVR
from sklearn.neural_network import MLPRegressor, MLPClassifier
# Functions
# ======================================================================
def getClassificationModelType(class_model_type, **kwargs):
if class_model_type == "svm": return MultiOutputClassifier(svm.SVC(kernel='rbf', **kwargs)) # binary classification model
if class_model_type == "random_forest": return RandomForestClassifier(max_depth=2, random_state=0, **kwargs) # binary classification model
if class_model_type == "ann": return MultiOutputClassifier(MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(8, 8), random_state=1, max_iter=10000, **kwargs))
def getColDataTypes(data_df, discrete_info_df):
return [col for col in data_df if discrete_info_df[col]['discrete']], [col for col in data_df if not discrete_info_df[col]['discrete']]
def getEdgeData(data_df, cols):
return data_df[cols]
def getHeartData():
df = pd.read_csv("data/heart.csv")
df.set_index(keys='ID', inplace=True)
return df
def getHeartInfo():
df = pd.read_csv("data/heart.info")
df.set_index(keys='info', inplace=True)
return df
def getMeanSquaredError(y_pred_df, y_df):
return round(mean_squared_error(y_pred=y_pred_df, y_true=y_df), 7)
def getModelAccuracy(y_pred_df, y_df):
return accuracy_score(y_true=y_df, y_pred=y_pred_df)
def getRegressionModelType(reg_model_type, **kwargs):
if reg_model_type == "ridge": return MultiOutputRegressor(Ridge(random_state=123, **kwargs))
if reg_model_type == "random_forest": return RandomForestRegressor(max_depth=2, random_state=0, **kwargs)
if reg_model_type == "k_neighbors": return KNeighborsRegressor(n_neighbors=2, **kwargs)
if reg_model_type == "svr": return MultiOutputRegressor(LinearSVR(random_state=0, tol=1e-05, max_iter=100000, **kwargs))
if reg_model_type == "ann": return MLPRegressor(solver='adam', alpha=1e-5, hidden_layer_sizes=(10, 10), random_state=1, max_iter=100000, **kwargs)
def getSampleData(data_df):
# n = 62,500
# training: 50,000
# testing: 12,500
return data_df.sample(n=62500, random_state=random.randint(a=0, b=2e9))
def main():
# run_simulation(5)
data_collected_1_df = pd.read_csv('data/data_collection_1.csv', index_col=['ID'])
data_collected_1_df.drop(columns=['chest'], inplace=True)
data_collected_2_df = pd.read_csv('data/data_collection_2.csv', index_col=['ID'])
data_collected_2_df.drop(columns=['chest'], inplace=True)
data_prediction([data_collected_1_df, data_collected_2_df])
def modelFit(model, X, y):
try:
# print("Fitting model...")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "")
model.fit(X, y)
except Exception:
print(traceback.print_exc)
# print("Fitting model using ravel()...")
# print(y.ravel())
model.fit(X, y.ravel())
def fitClassificationFeatures(X, y):
# edge features
models = []
y_dfs = []
# model_data_type = 'classification'
# print("Fitting", model_data_type, "models...")
for model_name in ['svm', 'random_forest', 'ann']:
y_temp = y
# print("Fitting", model_name, "...")
if model_name=='ann':
model = getClassificationModelType(model_name)
else:
if model_name=='svm': # pseudo-classification
model = getRegressionModelType('svr')
elif model_name=='random_forest': # pseudo-classification
model = getRegressionModelType(model_name)
y_df = pd.DataFrame(y)
y_dum_df = pd.get_dummies(y_df, columns=y.columns, prefix=y.columns)
y = y_dum_df
# print(y.head())
y_dfs.append(y)
modelFit(model, X, y)
models.append(model)
y = y_temp
# print("Finished edge features classification model fitting...")
return models, y_dfs # fitted classfication models of edge features
def predictClassificationFeatures(models, X, y, discrete_cols, results_cols):
results_df = pd.DataFrame()
gen_cols = []
# edge features generating discrete features
model_data_type ='classification'
# print("Predicting", model_data_type, "models...")
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
# print(model)
y_cols = pd.get_dummies(y, columns=y.columns, prefix=y.columns).columns \
if model_name=='svm' or model_name=='random_forest' else y.columns
heart_gen_prime_df = pd.DataFrame(model.predict(X), columns=y_cols, index=y.index)
if model_name=='svm' or model_name=='random_forest': # binary
for y_col in discrete_cols:
y_pred_cols = [y_pred_col for y_pred_col in heart_gen_prime_df.columns if y_pred_col.startswith(y_col+"_")]
y_pred_cols_df = heart_gen_prime_df[y_pred_cols]
y_pred_cols_df.columns = [y_pred_col.split(y_col+"_", 1)[1] for y_pred_col in y_pred_cols]
heart_gen_prime_df[y_col] = y_pred_cols_df[y_pred_cols_df.columns].idxmax(axis=1)
heart_gen_prime_df.drop(columns=y_pred_cols, inplace=True)
gen_cols.append(heart_gen_prime_df)
# UNCOMMENT
# print('expected')
# print(y[discrete_cols].head(10))
# print([len(y[col].unique()) for col in discrete_cols])
# print('predicted')
# print(heart_gen_prime_df.head(10))
# print([len(heart_gen_prime_df[col].unique()) for col in heart_gen_prime_df.columns])
if isinstance(heart_gen_prime_df, object): # and isinstance(y, np.int64):
# print('convert y_pred_df int64')
heart_gen_prime_df = heart_gen_prime_df.astype('int64')
if isinstance(heart_gen_prime_df, np.int32) and isinstance(y, np.float64):
# print('convert y_pred_df float')
heart_gen_prime_df = heart_gen_prime_df.astype('float64')
accuracy = [getModelAccuracy(y_pred_df=heart_gen_prime_df[col], y_df=y[col]) for col in y.columns]
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, accuracy]).transpose())
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("gen_class_cols_results_df:")
# print(results_df)
return gen_cols
def fitRegressionFeatures(X, y):
# edge features
models = []
y_dfs = []
# model_data_type = 'regression'
# print("Fitting", model_data_type, "models...")
for model_name in ['ridge', 'random_forest', 'svr', 'ann']:
# print("Fitting", model_name, "...")
model = getRegressionModelType(model_name)
y_dfs.append(y)
modelFit(model, X, y)
models.append(model)
# print("Finished edge features regression model fitting...")
return models # fitted regression models of edge features
def predictRegressionFeatures(models, X, y, results_cols):
results_df = pd.DataFrame()
gen_cols = []
# edge features generating continuous features
model_data_type ='regression'
# print("Predicting", model_data_type, "models...")
model_names = ['ridge', 'random_forest', 'svr', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
heart_gen_prime_df = pd.DataFrame(model.predict(X), columns=y.columns, index=y.index)
mse = [getMeanSquaredError(y_pred_df=heart_gen_prime_df[col], y_df=y[col]) for col in y.columns]
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, mse]).transpose())
gen_cols.append(heart_gen_prime_df)
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("gen_reg_cols_results_df:")
# print(results_df)
return gen_cols
def fitAllFeatures(X, y):
# all 13 features
models = []
# model_data_type = 'classification'
# print("Fitting", "models...")
for model in ['svm', 'random_forest', 'ann']:
# print("Fitting", model, "...")
model = getClassificationModelType(model)
modelFit(model, X, y)
models.append(model)
# print("Finished all features classification model fitting...")
return models # fitted classification models of all 13 features
def predictAllFeatures(models, X, y, results_cols):
results_df = pd.DataFrame()
# all 13 features
model_data_type ='classification'
# print("Predicting", model_data_type, "models...")
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
y_prime_df = pd.DataFrame(model.predict(X), index=y.index)
accuracy = getModelAccuracy(y_pred_df=y_prime_df, y_df=y)
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, accuracy]).transpose())
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("results_df:")
# print(results_df)
return results_df
def data_prediction(data_collected_dfs):
heart_data_df = getSampleData(getHeartData())
heart_label_df = pd.DataFrame(heart_data_df['class'])
heart_info_df = getHeartInfo()
for df in [heart_data_df, heart_info_df]: df.drop(columns=['class'], inplace=True)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_data_df, discrete_info_df=heart_info_df)
heart_data_continuous_df = heart_data_df[continuous_cols]
heart_data_discrete_df = heart_data_df[discrete_cols]
# normalizes continuous features
heart_data_continuous_df = (heart_data_continuous_df-heart_data_continuous_df.min())/(heart_data_continuous_df.max()-heart_data_continuous_df.min())
# recombines normalized continuous features with regression features
heart_data_df = pd.concat([heart_data_continuous_df, heart_data_discrete_df], axis=1)
# splits data into training and testing dataframes
X_heart_train_df, X_heart_test_df, y_heart_train_df, y_heart_test_df = train_test_split(heart_data_df, heart_label_df, test_size = 0.2, random_state=random.randint(a=0, b=2e9), shuffle=True)
# fits on training data and all 13 features
models_all_feat = fitAllFeatures(X=X_heart_train_df, y=y_heart_train_df)
edge_cols = ['age',
'sex',
'resting_blood_pressure',
'fasting_blood_sugar',
'resting_electrocardiographic_results',
'maximum_heart_rate_achieved',
'exercise_induced_angina']
# edge data collection
heart_edge_train_df = getEdgeData(data_df=X_heart_train_df, cols=edge_cols)
heart_edge_test_df = getEdgeData(data_df=X_heart_test_df, cols=edge_cols)
# expected generated columns
heart_gen_train_df = X_heart_train_df.drop(columns=edge_cols)
heart_gen_test_df = X_heart_test_df.drop(columns=edge_cols)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_gen_test_df, discrete_info_df=heart_info_df)
y = heart_gen_train_df[discrete_cols]
# combine dataframes
data_collected_df = | pd.concat(data_collected_dfs, axis=0) | pandas.concat |
#!/usr/bin/python3
# # Data Indexer
# This script sweeps the file index and consolidate channel and site information.
# - Read files on designated folder
# Import standard libraries
import pandas as pd
import h5py
# Import specific libraries used by the cortex system
import h5_spectrum as H5
import cortex_names as cn
import cortex_lib as cl
def _main():
index_store = pd.HDFStore(cn.FOLDER_TO_STORE_FILES+'/'+cn.INDEX_FILENAME)
file_index = index_store[cn.FILE_INDEX]
index_length = len(file_index.index)-1
# create empty dataframe to store the resulting profile
profile_array_result = | pd.DataFrame() | pandas.DataFrame |
from gensim import corpora
import gensim
from gensim.matutils import hellinger
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
from IPython.core.display import HTML
from collections import defaultdict
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
import pprint
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
from pyvis.network import Network
from weasyprint import HTML
class TopicModeler:
def CheckOverlaps(self, dist_tolerance, parsed_dict, topic_dict):
overlaps_graph = {}
overlaps_print = {}
for doc1 in parsed_dict:
#print(doc1)
for index_doc1, topics_doc1 in enumerate(parsed_dict[doc1]):
#print(" ", topics_doc1)
#print('------------------')
for doc2 in parsed_dict:
if doc1 == doc2:
break
for index_doc2, topics_doc2 in enumerate(parsed_dict[doc2]):
dist = hellinger(topics_doc1, topics_doc2)
if(dist <= dist_tolerance):
doc1_topic_graph = doc1 + ': Topic ' + str(index_doc1 + 1)
doc2_topic_graph = doc2 + ': Topic ' + str(index_doc2 + 1)
doc1_topic_print = self.GetNestedElement(topic_dict, doc1, index_doc1)
doc2_topic_print = self.GetNestedElement(topic_dict, doc2, index_doc2)
try:
overlaps_graph[(doc1_topic_graph)] += [(doc2_topic_graph, dist)]
overlaps_print[(doc1 + ': Topic ' + str(index_doc1 + 1), doc1_topic_print)] += [(doc2 + ': Topic ' + str(index_doc2 + 1), doc2_topic_print)]
except KeyError:
overlaps_graph[(doc1_topic_graph)] = [(doc2_topic_graph, dist)]
overlaps_print[(doc1 + ': Topic ' + str(index_doc1 + 1), doc1_topic_print)] = [(doc2 + ': Topic ' + str(index_doc2 + 1), doc2_topic_print)]
return overlaps_graph, overlaps_print
def CreateCorporaDocSpecific(self, num_topics, num_passes, word_list, num_words):
parsed_topics = []
cleaned_topics = []
if len(word_list) < 1:
return parsed_topics, cleaned_topics
word_dictionary = corpora.Dictionary([word_list])
corpus = [word_dictionary.doc2bow(text) for text in [word_list]]
lda_model = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word=word_dictionary, passes=num_passes)
topics = lda_model.show_topics(num_words=num_words)
for topic in topics:
words = []
vectored_topic = []
topic_num, topic = topic
topic = topic.split('+')
for word in topic:
prob, word = word.split('*')
topic_word = word.replace(" ", "").replace('"', '')
words.append(topic_word)
word = lda_model.id2word.doc2bow([topic_word])[0][0]
vectored_topic.append((word, float(prob)))
parsed_topics.append(vectored_topic)
cleaned_topics.append(words)
return cleaned_topics, parsed_topics
def GetNestedElement(self, topic_dict, key, index):
topics = topic_dict.get(key)
topic = ' '.join(topics[index])
return topic
def CreateGraph(self, graph_type, overlaps):
overlap_frame = pd.DataFrame(columns=['Source', 'Target', 'Type', 'Weight'])
for overlap in overlaps:
for sub_lap in overlaps[overlap]:
overlap_frame = overlap_frame.append({
'Source' : overlap,
'Target' : sub_lap[0],
'Type' : 'directed',
'Weight' : ((1 - sub_lap[1]) / 25)
}, ignore_index=True)
net = Network(height='100%', width='100%', directed=True)
sources = overlap_frame['Source']
targets = overlap_frame['Target']
weights = overlap_frame['Weight']
edge_data = zip(sources, targets, weights)
graph = nx.DiGraph()
for index, e in enumerate(edge_data):
src = e[0]
dst = e[1]
w = e[2]
if(graph_type == 'NetworkX'):
graph.add_node(src)
graph.add_node(dst)
graph.add_edge(src, dst, weights=w)
else:
net.add_node(src, src, title=src, physics=False, group=index, arrowStrikethrough=False)
net.add_node(dst, dst, title=dst, physics=False, group=index, arrowStrikethrough=False)
net.add_edge(src, dst, value=w, physics=False)
if(graph_type == 'PyVis'):
options = {
'layout': {
'hierarchical': {
'enabled': True,
'levelSeparation': 50,
'treeSpacing': 75,
'nodeSpacing': 500,
'edgeMinimization': False
}
}
}
net.options = options
connections = net.get_adj_list()
for node in net.nodes:
node['size'] = len(connections[node['id']]) / 3
node['title'] += ' Neighbors: <br>' + '<br>'.join(connections[node['id']])
node['value'] = len(connections[node['id']])
net.from_nx(graph)
net.show('SimilarityVisualizationGraph.html')
else:
degrees = [val * 10 for (node, val) in graph.degree()]
pos = nx.circular_layout(graph)
nx.draw(graph, pos, node_size=degrees, with_labels=True, font_size=8)
plt.show()
def PrintOverlapPdf(self, overlaps, topic_dict):
overlap_frame = pd.DataFrame(columns=['Course', 'Topic', 'Similar Course', 'Similar Topic'])
for overlap in overlaps:
for sub_lap in overlaps[overlap]:
overlap_frame = overlap_frame.append({
'Course' : overlap[0],
'Topic' : overlap[1],
'Similar Course' : sub_lap[0],
'Similar Topic' : sub_lap[1]
}, ignore_index=True)
table = overlap_frame.to_html()
table_html = HTML(string=table)
table_html.write_pdf('Topic Similarities.pdf')
topicFrame = | pd.DataFrame(columns=['Course', 'Topics']) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.fixture
def dataframe_with_duplicate_index():
"""Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
data = [['a', 'd', 'e', 'c', 'f', 'b'],
[1, 4, 5, 3, 6, 2],
[1, 4, 5, 3, 6, 2]]
index = ['h1', 'h3', 'h5']
columns = MultiIndex(
levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']],
codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
names=['main', 'sub'])
return DataFrame(data, index=index, columns=columns)
@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x],
lambda s, x: s.loc[:, x],
lambda s, x: s.xs(x, level=1)])
@pytest.mark.parametrize('level1_value, expected', [
(0, Series([1], index=[0])),
(1, Series([2, 3], index=[1, 2]))
])
def test_series_getitem_multiindex(access_method, level1_value, expected):
# GH 6018
# series regression getitem with a multi-index
s = | Series([1, 2, 3]) | pandas.Series |
import pandas as pd
import sparse
import numpy as np
class AnnotationData:
"""
Contains all the segmentation and assignment data
WARNING: self.assignments['Clusternames'] will contain neurite ids (as strings) rather than names
"""
# Todo: if we can preserve segments instead of merging them when two segs are one same neuron, that would help
# (make possible) the classification
# TODO: what happens to features when neurons/segs are reassigned? features go rotten because the segment key is unchanged
def __init__(self, stem_savefile, frame_shape: tuple = (512, 512, 35)): # Todo: is it right to have a default value here?
"""
Initialize the class for segments and assignments
:param stem_savefile: The stem name for the files in which to save assignments and segments
:param frame_shape: the shape of the numpy array of any frame of the video
"""
self._normal_seg_file = stem_savefile + "_segmented.csv"
self._coarse_seg_file = stem_savefile + "_highthresh_segmented.csv"
self.assignment_file = stem_savefile + "_assignment.csv"
try:
self._normal_data_frame = pd.read_csv(self._normal_seg_file)
except FileNotFoundError:
self._normal_data_frame = | pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int) | pandas.DataFrame |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df= | pd.read_json(path,lines=True) | pandas.read_json |
#!/usr/bin/env python
# coding: utf-8
import torch
import numpy as np
from sklearn import metrics
import pandas as pd
import torch.utils.data as Data
import sklearn
from sklearn import tree
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.preprocessing import QuantileTransformer
from xgboost import XGBClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
df = pd.read_csv("./beijing_cate2id.csv")
shape1 = df.shape[0]
baseline_train_df = df.iloc[0:int(0.6 * shape1)]
baseline_val_df = df.iloc[int(0.6 * shape1):int(0.8 * shape1)]
baseline_test_df = df.iloc[int(0.8 * shape1):]
qt = QuantileTransformer(output_distribution="normal").fit(df.loc[:, df.columns != 'scene'])
x_train = baseline_train_df.loc[:, baseline_train_df.columns != 'scene']
a = x_train.columns
x_train = qt.transform(x_train)
x_train = pd.DataFrame(x_train)
x_train.columns = a
y_train = pd.Categorical(baseline_train_df.scene).codes
x_test = baseline_test_df.loc[:, baseline_test_df.columns != 'scene']
a = x_test.columns
x_test = qt.transform(x_test)
x_test = | pd.DataFrame(x_test) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.decomposition import NMF
class ClusterModel:
@property
def clusters(self):
return len(self._cluster_names)
@property
def cluster_names(self):
return self._cluster_names
@clusters.setter
def clusters(self, value):
self._cluster_names = [f'C_{i}' for i in range(0, value)]
@property
def ratings_matrix(self):
return self._X
@ratings_matrix.setter
def ratings_matrix(self, ratings_dfm):
self._X = ratings_dfm # 'V' in some literature
def build(self):
assert self.ratings_matrix is not None
assert self.cluster_names is not None
model = NMF(n_components=self.clusters, init='random', random_state=0)
self._W = model.fit_transform(self._X) # 'features' matrix
self._H = model.components_ # 'coefficients' matrix
self._err = model.reconstruction_err_ # divergence between W.H and X
return self._err
def reconstruct(self, round_decimals=None):
assert self._X is not None
assert self._W is not None
assert self._H is not None
Xhat = self._W.dot(self._H)
if round_decimals and round_decimals > 0:
Xhat = np.round(Xhat, decimals=round_decimals)
return pd.DataFrame(Xhat, index=self._X.index, columns=self._X.columns)
@property
def subject_cluster_dfm(self):
return pd.DataFrame(self._W, index=self._X.index, columns=self._cluster_names)
@property
def object_cluster_dfm(self):
# Note intentional transport to orient H consistent with W.
return | pd.DataFrame(self._H, index=self._cluster_names, columns=self._X.columns) | pandas.DataFrame |
import json
import pytest
import numpy as np
import pandas as pd
import scipy.spatial.distance as scipy_distance
from whatlies import Embedding, EmbeddingSet
from .common import validate_plot_general_properties
"""
*Guide*
Here are the plot's propertites which could be checked (some of them may not be applicable
for a particular plot/test case):
- type: the type of plot; usually it's scatter plot with circle marks.
- data_field: the name of the field of chart data which is used for datapoints' coordinates.
- data: the position (i.e. coordinates) of datapoints in the plot.
- x_label: label of x-axis.
- y_label: label of y-axis.
- tilte: title of the plot.
- label_field: the name of the field of chart data which is used for annotating data points with text labels.
- label: the text labels used for annotation of datapoints.
- color_field: the name of the field of chart data which is used for coloring datapoints.
"""
@pytest.fixture
def embset():
names = ["red", "blue", "green", "yellow", "white"]
vectors = np.random.rand(5, 4) * 10 - 5
embeddings = [Embedding(name, vector) for name, vector in zip(names, vectors)]
return EmbeddingSet(*embeddings)
def test_default(embset):
p = embset.plot_interactive()
chart = json.loads(p.to_json())
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": embset.to_X()[:, :2],
"x_label": "Dimension 0",
"y_label": "Dimension 1",
"title": "Dimension 0 vs. Dimension 1",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
# Check if it's an interactive plot (done only in this test)
assert "selection" in chart["layer"][0]
# Check tooltip data (only done in this test case)
tooltip_fields = set(
[
chart["layer"][0]["encoding"]["tooltip"][0]["field"],
chart["layer"][0]["encoding"]["tooltip"][1]["field"],
]
)
assert tooltip_fields == set(["name", "original"])
def test_int_axis(embset):
p = embset.plot_interactive(x_axis=2, y_axis=0, x_label="xaxis", title="some chart")
chart = json.loads(p.to_json())
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": np.concatenate([embset.to_X()[:, 2:3], embset.to_X()[:, :1]], axis=-1),
"x_label": "xaxis",
"y_label": "Dimension 0",
"title": "some chart",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_int_axis_with_common_str_axis_metric(embset):
p = embset.plot_interactive(x_axis=1, y_axis=2, axis_metric="cosine_similarity")
chart = json.loads(p.to_json())
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": embset.to_X()[:, 1:3],
"x_label": "Dimension 1",
"y_label": "Dimension 2",
"title": "Dimension 1 vs. Dimension 2",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_str_axis(embset):
p = embset.plot_interactive(x_axis="red", y_axis="blue")
chart = json.loads(p.to_json())
vectors = []
for e in embset.embeddings.values():
vectors.append([e > embset["red"], e > embset["blue"]])
vectors = np.array(vectors)
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": vectors,
"x_label": "red",
"y_label": "blue",
"title": "red vs. blue",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_str_axis_with_common_str_axis_metric(embset):
p = embset.plot_interactive(
x_axis="red",
y_axis="blue",
y_label="blue_cosine",
axis_metric="cosine_distance",
color="name",
)
chart = json.loads(p.to_json())
vectors = []
for e in embset.embeddings.values():
vectors.append(
[
scipy_distance.cosine(e.vector, embset["red"].vector),
scipy_distance.cosine(e.vector, embset["blue"].vector),
]
)
vectors = np.array(vectors)
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": vectors,
"x_label": "red",
"y_label": "blue_cosine",
"title": "red vs. blue",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "name",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_str_axis_with_different_axis_metric(embset):
p = embset.plot_interactive(
x_axis="red", y_axis="blue", axis_metric=[np.dot, "euclidean"]
)
chart = json.loads(p.to_json())
vectors = []
for e in embset.embeddings.values():
vectors.append(
[
np.dot(e.vector, embset["red"].vector),
scipy_distance.euclidean(e.vector, embset["blue"].vector),
]
)
vectors = np.array(vectors)
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": vectors,
"x_label": "red",
"y_label": "blue",
"title": "red vs. blue",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_emb_axis(embset):
p = embset.plot_interactive(x_axis=embset["yellow"], y_axis=embset["white"])
chart = json.loads(p.to_json())
vectors = []
for e in embset.embeddings.values():
vectors.append([e > embset["yellow"], e > embset["white"]])
vectors = np.array(vectors)
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": vectors,
"x_label": "yellow",
"y_label": "white",
"title": "yellow vs. white",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = pd.DataFrame(chart["datasets"][chart["data"]["name"]])
assert [
chart["layer"][0]["encoding"]["x"]["field"],
chart["layer"][0]["encoding"]["y"]["field"],
] == props["data_field"]
assert np.array_equal(chart_data[["x_axis", "y_axis"]].values, props["data"])
assert chart["layer"][0]["encoding"]["color"]["field"] == props["color_field"]
assert chart["layer"][1]["encoding"]["text"]["field"] == props["label_field"]
assert np.array_equal(chart_data["original"].values, props["label"])
validate_plot_general_properties(chart["layer"][0], props)
def test_emb_axis_with_common_str_axis_metric(embset):
p = embset.plot_interactive(
x_axis=embset["red"],
y_axis=embset["green"],
axis_metric="cosine_similarity",
annot=False,
)
chart = json.loads(p.to_json())
vectors = []
for e in embset.embeddings.values():
vectors.append(
[
1 - scipy_distance.cosine(e.vector, embset["red"].vector),
1 - scipy_distance.cosine(e.vector, embset["green"].vector),
]
)
vectors = np.array(vectors)
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": vectors,
"x_label": "red",
"y_label": "green",
"title": "red vs. green",
"color_field": "",
# Not applicable: label_field, label
}
chart_data = | pd.DataFrame(chart["datasets"][chart["data"]["name"]]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import findspark
findspark.init('spark24')
from pyspark.sql import SparkSession
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
reviews = pd.read_csv("/home/yashika/Downloads/zomato.csv")
reviews.head(3)
# In[3]:
#pd.show_versions()
#reviews.value_counts()
# In[4]:
reviews['location'].value_counts().head(10).plot.bar()
# In[5]:
len(reviews)
# In[6]:
#reviews['votes']
reviews['votes'].value_counts().head(10).sort_index().plot.bar()
# In[7]:
df = pd.DataFrame(np.random.rand(10,4))
# In[8]:
df.cumsum()
df.plot()
# In[9]:
#pd.Series(np.random.rand(10))
ts = pd.Series(np.random.rand(100))
# In[10]:
#ts.cumsum()
# In[11]:
plt.figure(figsize=(10,5))
ts.plot()
#plt.plot()
# In[12]:
len(df)
# In[13]:
list(range(len(df)))
# In[14]:
df[0] = pd.Series()
# In[16]:
#df.plot(0)
# In[19]:
df3 = pd.DataFrame(np.random.randn(10, 2))
# In[38]:
df3.plot()
#columns=['B', 'C']
df3['A'] = pd.Series(list(range(len(df))))
#df3['A']=pd.Series(len(df))
# In[39]:
df3.plot()
# In[40]:
df3.plot()
# In[41]:
df3['A']
# In[43]:
df.iloc[5]
# In[44]:
df = pd.DataFrame(np.random.rand(10,4))
# In[45]:
# In[46]:
df
# In[50]:
df.iloc[4].plot(kind='bar')
# In[54]:
plt.axhline(3, color='red');
# In[55]:
df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
# In[56]:
df2
# In[57]:
df2.plot()
# In[59]:
df2.plot.bar(stacked=True);
# In[60]:
df2.plot.hist(stacked=True);
# In[63]:
data = { 'Company' :['Hello','heyy','Good','bad'], 'Person' :['Parsang','Yashika','Payal','Parsika'] ,'Sales' :[200,300,100,500]}
# In[64]:
df4= | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python
# _____________________________________________________________________________
# ----------------
# import libraries
# ----------------
# standard libraries
# -----
import torch
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import json
# utilities
# -----
# custom functions
# -----
def show_batch(sample_batched):
"""
sample_batched: Tuple[torch.tensor, torch.tensor] -> None
show_batch takes a contrastive sample sample_batched and plots
an overview of the batch
"""
grid_border_size = 2
nrow = 10
batch_1 = sample_batched[0][0][:, 0:, :, :]
batch_2 = sample_batched[0][1][:, 0:, :, :]
difference = np.abs(batch_1 - batch_2)
titles = ["first contrast", "second contrast", "difference"]
fig, axes = plt.subplots(1, 3, figsize=(2 * 6.4, 4.8))
for (i, batch) in enumerate([batch_1, batch_2, difference]):
ax = axes[i]
grid = utils.make_grid(batch, nrow=nrow, padding=grid_border_size)
ax.imshow(grid.numpy().transpose((1, 2, 0)))
ax.set_title(titles[i])
ax.axis("off")
plt.show()
# ----------------
# custom classes
# ----------------
# custom CLTT dataset superclass (abstract)
# -----
class CLTTDataset(Dataset):
"""
CLTTDataset is an abstract class implementing all the necessary methods
to sample data according to the CLTT approach. CLTTDataset itself
should not be instantiated as a standalone class, but should be
inherited from and abstract methods should be overwritten
"""
def __init__(self, root, train=True, transform=None, target_transform=None,
n_fix=5, contrastive=True, sampling_mode='uniform', shuffle_object_order=True, circular_sampling=True, buffer_size=12096):
"""
__init__ initializes the CLTTDataset Class, it defines class-wide
constants and builds the registry of files and the data buffer
root:str path to the dataset directory
train:bool training set instead of testing
transform:torchvision.transform
target_transform:torchvision.transform
n_fix:int for deterministic n_fix, float for probabilistic
contrastive:bool contrastive dataset mode
sampling_mode:str how the buffer gets built
circular_sampling:bool make the first object the last object
buffer_size:int approximate buffersize
"""
super().__init__()
self.train = train
self.sampling_mode = sampling_mode
self.shuffle_object_order = shuffle_object_order
self.buffer_size = buffer_size
self.n_fix = n_fix
self.tau_plus = 1
self.tau_minus = 0 # contrasts from the past (experimental)
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.contrastive = contrastive
self.circular_sampling = circular_sampling
self.get_dataset_properties()
self.registry = self.build_registry(train)
if self.contrastive:
self.buffer = self.build_buffer(self.registry, self.sampling_mode, self.n_fix, self.shuffle_object_order, approx_size=self.buffer_size)
else:
# if used in non-contrastive mode the sampler just samples from all data
self.buffer = self.registry
pass
def __len__(self):
"""
__len__ defines the length of the dataset and indirectly
defines how many samples can be drawn from the dataset
in one epoch
"""
length = len(self.buffer)
return length
def get_dataset_properties(self):
"""
get_dataset_properties has to be defined for each dataset
it stores number of objects, number of classes, a list of
strings with labels
"""
# basic properties (need to be there)
self.n_objects = 3 # number of different objects >= n_classes
self.n_classes = 3 # number of different classes
self.labels = [
"A",
"B",
"C",
]
self.n_views_per_object = 10 # how many overall views of each object
self.subdirectory = '/dataset_name/' # where is the dataset
self.name = 'dataset name' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
self.custom_property = ['one', 'two', 'three']
raise Exception("Calling abstract method, please inherit \
from the CLTTDataset class and reimplement this method") # pseudoclass
pass
def __getitem__(self, idx):
"""
__getitem__ is a method that defines how one sample of the
dataset is drawn
"""
if self.contrastive:
image, label = self.get_single_item(idx)
augmentation, _ = self.sample_contrast(idx)
if self.transform:
image, augmentation = self.transform(
image), self.transform(augmentation)
if self.target_transform:
label = self.target_transform(label)
output = ([image, augmentation], label)
else:
image, label = self.get_single_item(idx)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
output = image, label
return output
def sample_contrast(self, chosen_index):
"""
given index chosen_index, sample a corresponding contrast close in time
"""
chosen_time = self.buffer.iloc[chosen_index]["time_idx"]
possible_indices = self.buffer[
(self.buffer["time_idx"].between(chosen_time - self.tau_minus, chosen_time + self.tau_plus)) & (
self.buffer["time_idx"] != chosen_time)].index
# sampling at the end of the buffer
if (chosen_time + self.tau_plus) > self.buffer.time_idx.max():
if self.circular_sampling:
also_possible = self.buffer[
(self.buffer["time_idx"].between(self.buffer.time_idx.min(), (
chosen_time + self.tau_plus - 1) - self.buffer.time_idx.max())) & (
self.buffer["time_idx"] != chosen_time)].index
else:
also_possible = self.buffer[self.buffer["time_idx"] == chosen_time].index
possible_indices = possible_indices.union(also_possible)
# sampling at the beginning of the buffer
if (chosen_time - self.tau_minus) < self.buffer.time_idx.min():
if self.circular_sampling:
also_possible = self.buffer[
(self.buffer["time_idx"].between(self.buffer.time_idx.max() + (chosen_time - self.tau_minus) + 1,
self.buffer.time_idx.max())) & (
self.buffer["time_idx"] != chosen_time)].index
else:
also_possible = self.buffer[self.buffer["time_idx"] == chosen_time].index
possible_indices = possible_indices.union(also_possible)
chosen_index = np.random.choice(possible_indices)
return self.get_single_item(chosen_index)
def get_single_item(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
elif isinstance(idx, pd.core.indexes.numeric.Int64Index):
idx = idx[0]
path_to_file = self.buffer.loc[idx, "path_to_file"]
if isinstance(path_to_file, pd.core.series.Series):
path_to_file = path_to_file.item()
image = Image.open(path_to_file)
obj_info = self.buffer.iloc[idx, 1:].to_dict()
label = self.buffer.loc[idx, "label"]
return image, label
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
d = self.root + self.subdirectory + 'train/' if train else self.root + self.subdirectory + 'test/'
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for timestep, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
path_list.append(full_path)
object_list.append(timestep // self.n_views_per_object)
label_list.append(timestep // self.n_views_per_object)
time_list.append(timestep % self.n_views_per_object)
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
def build_buffer(self, registry, sampling_mode, n_fix, shuffle_object_order, approx_size):
"""
build_buffer builds a buffer from all data that is available
according to the sampling mode specified. Default method just
returns the whole registry
"""
# if n_fix is a probability, then get an expected value of the number of views
expected_views = n_fix if n_fix >= 1 else self.expected_n(n_fix)
object_order = np.arange(self.n_objects)
if shuffle_object_order:
np.random.shuffle(object_order)
if sampling_mode == 'window':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_index = np.random.choice(np.arange(0, self.n_views_per_object - n_views))
streambits.append(registry[registry.object_nr == o][
registry.time_idx.between(chosen_index, chosen_index + n_views - 1)])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'uniform':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_indexs = np.random.choice(np.arange(0, self.n_views_per_object), n_views)
streambits.append(registry[registry.object_nr == o].iloc[chosen_indexs])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'randomwalk':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
streambits.append(registry.iloc[self.get_N_randomwalk_steps(n_views, o)])
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
else:
print("[INFO] Warning, no sampling mode specified, defaulting to \
whole dataset")
timestream = registry #if no mode, then return the whole registry
return timestream
def refresh_buffer(self):
"""
refresh buffer takes an CLTTDataset class and refreshes its own buffer
given the registry
"""
self.buffer = self.build_buffer(self.registry, self.sampling_mode, self.n_fix, self.shuffle_object_order, self.buffer_size)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
raise Exception("Calling abstract method, please inherit \
from the CLTTDataset class and reimplement this method") # pseudoclass
pass
def expected_n(self, probability):
"""
expected_n takes a float probability between 0 and 1
and returns the expected value of the number of fixations
"""
result = (1-probability)*(probability)/(1-(probability))**2 + 1
return result
def get_n(self, input):
"""
get_n takes a float probability input between 0 and 1
and returns n fixations according to probability
if input >= 1 it just returns its argument
"""
if input >= 1:
return input
else:
result = 1 # make sure that you switch to the next object once
while input > np.random.random():
result += 1
return result
# datasets (CLTTDataset subclasses)
# -----
# TODO: Rewrite MiyashitaDataset to be compatible with probabilistic n_fix
class MiyashitaDataset(CLTTDataset):
"""
MiyashitaDataset is a dataset inspired by the works of
Miyashita, 1988 it is comprised of a set of different fractal patterns that
are presented in a specific order to be associated.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 100 # number of different objects >= n_classes
self.n_classes = 100 # number of different classes
self.labels = [str(i) for i in range(self.n_classes)]
self.n_views_per_object = 100 if (self.train and self.contrastive) else 1
#self.n_fix # how many overall views of each object
self.subdirectory = '/fractals100_64x64/' # where is the dataset
self.name = 'Miyashita Fractals' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
# for Miyashita every mode is the same
# that means we do not need to reimplement get_n_randomwalk_steps
# and can just fix the sampling mode
self.sampling_mode = "uniform" if (self.train and self.contrastive) else "" # overwrite sampling mode
self.basic_transform = transforms.RandomAffine(
degrees=(-10, 10),
translate=(0.15, 0.15),
scale=(0.9, 1.0))
# add the basic transform the the regular transform for training
if (self.train and self.contrastive):
self.transform = transforms.Compose([
self.basic_transform,
self.transform,
])
pass
def build_registry(self, train):
"""
Reimplementation of the build_registry method, because Miyashita
Fractals have no testset and the in-class variability is generated
virtually instead of having multiple pictures
"""
path_list = []
object_list = []
label_list = []
time_list = []
e = 0
d = self.root + self.subdirectory # there is no fractals testset
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for o, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
repetitions = self.n_views_per_object
# repeat the same picture n_fix times
for timestep in range(repetitions):
path_list.append(full_path)
time_list.append(timestep + e * self.n_views_per_object)
object_list.append(o)
label_list.append(o)
e += 1
temporary_dict = {'path_to_file': path_list,
'label': label_list,
'object_nr': object_list,
'time_idx': time_list}
dataframe = pd.DataFrame(temporary_dict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
class TDWDataset(CLTTDataset):
"""
The ThreeDWorld Dataset by <NAME> is
comprised of 1008 views around 12 distinct objects rendered
in the TDW environment
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 12 # number of different objects >= n_classes
self.n_classes = 12 # number of different classes
self.labels = [
"cup",
"comb",
"scissor",
"hammer",
"book",
"calculator",
"goblet",
"candle",
"headphones",
"screwdriver",
"cassette",
"bottle",
]
delta_phi = 10
self.phis = np.arange(0, 360, delta_phi)
delta_theta = 10
self.thetas = np.arange(10, 80, delta_theta)
delta_r = 0.1
self.rs = np.arange(0.3, 0.7, delta_r)
self.n_views_per_object = len(self.phis) * len(self.thetas) * len(self.rs) # how many overall views of each object
self.subdirectory = '/spherical_photoreal_64x64_DoF/' # where is the dataset
self.name = 'ThreeDWorld Objects' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
def get_registry_index(r, theta, phi):
"""
helper function to get index given a coordinate tuple,
i.e. r, theta and phi value
"""
ix = r * (len(self.thetas) * len(self.phis)) + theta * len(self.phis) + phi
return ix
index = []
# Possible values for r,theta and phi
r = len(self.rs)
theta = len(self.thetas)
phi = len(self.phis)
# select random start values for r,theta and phi
current_r = np.random.randint(0, r - 1)
current_theta = np.random.randint(0, theta - 1)
current_phi = np.random.randint(0, phi - 1)
for i in range(N):
while True:
# 6 possible direction in which to go from the current position
# Possible steps: +/-r, +/-Phi, +/-Theta
rand = np.random.randint(low=0, high=5)
# For the choosen step direction, it will be checked if this is a "valid".
if (rand == 0) & (current_r < r - 1):
current_r += 1
break
if (rand == 1) & (current_r > 0):
current_r -= 1
break
if (rand == 2) & (current_theta < theta - 1):
current_theta += 1
break
if (rand == 3) & (current_theta > 0):
current_theta -= 1
break
if (rand == 4) & (current_phi < phi - 1):
current_phi += 1
break
if (rand == 5) & (current_phi > 0):
current_phi -= 1
break
# transform r,theta, phi values
# into index number between 0 and 1008
ix = get_registry_index(
current_r, current_theta, current_phi)
index.append(ix)
index = np.array(index)
# to get index values for object "object_nr", the values are shifted
index += self.n_views_per_object * object_nr
return index
def additional_metadata(self):
# hacky way to get some metadata, to be revised
phi_angle_list = []
theta_angle_list = []
radius_list = []
for o in range(self.n_classes):
for r in self.rs:
for theta in self.thetas:
for phi in self.phis:
phi_angle_list.append(phi)
theta_angle_list.append(theta)
radius_list.append(r)
tempdict = {'phi': phi_angle_list, 'theta': theta_angle_list, 'radius': radius_list}
dataframe = pd.DataFrame(tempdict)
self.registry= pd.merge(self.registry, dataframe, left_index=True, right_index=True)
pass
class COIL100Dataset(CLTTDataset):
"""
COIL100Dataset is a dataset by the work of Sameer, Shree, and Hiroshi, 1996.
It is comprised of color images of 100 objects, and each object has 72 views.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 100 # number of different objects >= n_classes
self.n_classes = 100 # number of different classes
self.labels = [str(i) for i in range(self.n_classes)]
if self.train:
self.n_views_per_object = 54 # number of overall views of each object on trainset
else:
self.n_views_per_object = 18 # number of overall views of each object on testset
self.subdirectory = '/coil100_128x128/' # where is the dataset
self.name = 'Columbia University Image Library' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
index = []
current_idx = np.random.randint(0, self.n_views_per_object - 1)
for i in range(N):
while True:
# 2 possible direction in which to go from the current position
# Possible steps: +: left, -: right
rand = np.random.randint(low=0, high=2)
if (rand == 0) & (current_idx > 0):
current_idx -= 1
break
if (rand == 1) & (current_idx < self.n_views_per_object - 1):
current_idx += 1
break
index.append(current_idx)
index = np.array(index)
index += self.n_views_per_object * object_nr
return index
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
# d = self.root + self.subdirectory
d = self.root + self.subdirectory + 'train/' if train else self.root + self.subdirectory + 'test/'
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for timestep, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
path_list.append(full_path)
object_list.append(timestep // self.n_views_per_object)
label_list.append(timestep // self.n_views_per_object)
time_list.append(timestep % self.n_views_per_object)
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
class RoadDefectsDataset(CLTTDataset):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 12 # number of different objects >= n_classes
self.n_classes = 12 # number of different classes
self.labels = ['Affaissement de rive',
'Affaissement hors rive',
'Arrachement',
'Autres réparations',
'Faïençage',
'Fissure longitudinale',
'Fissure transversale',
'Glaçage - Ressuage',
'Réparation en BB sur découpe',
'Fissure thermique',
'Orniérage',
'Background'
]
self.subdirectory = '/home/finn/DATASET/CD33/' # where is the dataset
self.name = 'Road defects' # name of the dataset
self.label_file_train = 'data/defects_train.json'
self.label_file_test = 'data/defects_val_small.json'
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr, n_views_per_object):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
index = []
current_idx = np.random.randint(0, n_views_per_object - 1)
for i in range(N):
while True:
# 2 possible direction in which to go from the current position
# Possible steps: +: left, -: right
rand = np.random.randint(low=0, high=2)
if (rand == 0) & (current_idx > 0):
current_idx -= 1
break
if (rand == 1) & (current_idx < n_views_per_object - 1):
current_idx += 1
break
index.append(current_idx)
index = np.array(index)
index += n_views_per_object * object_nr
return index
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
# d = self.root + self.subdirectory
# d = self.subdirectory + 'train/' if train else self.subdirectory + 'test/'
d = self.subdirectory
#load labels
if self.train:
with open(self.label_file_train, 'r') as f_in:
labels = json.load(f_in)
else:
with open(self.label_file_test, 'r') as f_in:
labels = json.load(f_in)
# dict to count instances of each class/object
time_dict = {l:0 for l in self.labels}
for file_name, l_list in labels.items():
full_path = os.path.join(d, file_name)
if os.path.isfile(full_path):
for label in set(l_list):
path_list.append(full_path)
object_list.append(self.labels.index(label))
label_list.append(self.labels.index(label))
time_list.append(time_dict[label])
time_dict[label] += 1
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
def build_buffer(self, registry, sampling_mode, n_fix, shuffle_object_order, approx_size):
"""
build_buffer builds a buffer from all data that is available
according to the sampling mode specified. Default method just
returns the whole registry
"""
# if n_fix is a probability, then get an expected value of the number of views
expected_views = n_fix if n_fix >= 1 else self.expected_n(n_fix)
object_order = np.arange(self.n_objects)
if shuffle_object_order:
np.random.shuffle(object_order)
if sampling_mode == 'window':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_index = np.random.choice(np.arange(0, registry.object_nr.value_counts()[o] - n_views))
streambits.append(registry[registry.object_nr == o][
registry.time_idx.between(chosen_index, chosen_index + n_views - 1)])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'uniform':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_indexs = np.random.choice(np.arange(0, registry.object_nr.value_counts()[o]), n_views)
streambits.append(registry[registry.object_nr == o].iloc[chosen_indexs])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'randomwalk':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
streambits.append(registry.iloc[self.get_N_randomwalk_steps(n_views, o, registry.object_nr.value_counts()[o])])
timestream = | pd.concat(streambits, ignore_index=True) | pandas.concat |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Load the datasets and merged them to generate a dataframe
to be used for analysis
Args:
messages_filepath: The path of messages dataset.
categories_filepath: The path of categories dataset.
Returns:
A merged dataset
'''
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import warnings
import yfinance as yf
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import seaborn as sns
import matplotlib as mpl
from matplotlib import pyplot as plt
from datetime import datetime, date
from yahooquery import Ticker
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from tensorflow import keras
import tensorflow as tf
import streamlit as st
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
warnings.filterwarnings("ignore")
np.random.seed(42)
mpl.use("Agg")
plt.style.use(["seaborn-darkgrid", "seaborn-poster"])
plt.rcParams["figure.figsize"] = [15, 8]
plt.rcParams["figure.dpi"] = 150
keras = tf.compat.v1.keras
Sequence = keras.utils.Sequence
today_stamp = str(datetime.now())[:10]
results_path = Path(f"data/variates/univariate/{today_stamp}/")
if not results_path.exists():
results_path.mkdir(parents=True)
def company_longName(symbol):
d = Ticker(symbol).quote_type
return list(d.values())[0]["longName"]
class The_Univariate_TS_Reg(object):
def __init__(self, stock_symbol):
self.ticker = stock_symbol
self.saver = company_longName(self.ticker)
def runs(self):
sp500 = yf.download(self.ticker, period="5y", interval="1d")
sp500 = pd.DataFrame(sp500["Adj Close"])
sp500.columns = [self.saver]
sp500.fillna(0.0, inplace=True)
scaler = MinMaxScaler()
sp500_scaled = pd.Series(scaler.fit_transform(sp500).squeeze(), index=sp500.index)
sp500_scaled.describe()
def create_univariate_rnn_data(data, window_size):
n = len(data)
y = data[window_size:]
data = data.values.reshape(-1, 1) # make 2D
X = np.hstack(
tuple(
[
data[i : n - j, :]
for i, j in enumerate(range(window_size, 0, -1))
]
)
)
return | pd.DataFrame(X, index=y.index) | pandas.DataFrame |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=["A", "B"])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert "rename" in message
assert "Use named arguments" in message
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
def test_droplevel(self):
# GH20342
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
df = df.set_index([0, 1]).rename_axis(["a", "b"])
df.columns = MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
# test that dropping of a level in index works
expected = df.reset_index("a", drop=True)
result = df.droplevel("a", axis="index")
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(["c", "d"], name="level_1")
result = df.droplevel("level_2", axis="columns")
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"])
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"])
assert is_interval_dtype(df["D"].cat.categories)
assert is_object_dtype(df["C"])
assert is_object_dtype(df["E"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"], check_names=False)
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"], check_names=False)
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_set_reset_index(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
df["B"] = s
df = df.set_index("B")
df = df.reset_index()
def test_set_axis_inplace(self):
# GH14636
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012],
)
expected = {0: df.copy(), 1: df.copy()}
expected[0].index = list("abc")
expected[1].columns = list("abc")
expected["index"] = expected[0]
expected["columns"] = expected[1]
for axis in expected:
result = df.copy()
result.set_axis(list("abc"), axis=axis, inplace=True)
tm.assert_frame_equal(result, expected[axis])
# inplace=False
result = df.set_axis(list("abc"), axis=axis)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = df.set_axis(list("abc"))
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, "foo":
with pytest.raises(ValueError, match="No axis named"):
df.set_axis(list("abc"), axis=axis)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012],
)
expected = {0: df.copy(), 1: df.copy()}
expected[0].index = list("abc")
expected[1].columns = list("abc")
expected["index"] = expected[0]
expected["columns"] = expected[1]
# old signature
for axis in expected:
with | tm.assert_produces_warning(FutureWarning) | pandas.util.testing.assert_produces_warning |
from sklearn import metrics
import numpy as np
import pandas as pd
import seaborn as sns
from .stats import *
from .scn_train import *
import matplotlib
import matplotlib.pyplot as plt
def divide_sampTab(sampTab, prop, dLevel="cell_ontology_class"):
cts = set(sampTab[dLevel])
trainingids = np.empty(0)
for ct in cts:
aX = sampTab.loc[sampTab[dLevel] == ct, :]
ccount = len(aX.index)
trainingids = np.append(trainingids, np.random.choice(aX.index.values, int(ccount*prop), replace = False))
val_ids = np.setdiff1d(sampTab.index, trainingids, assume_unique = True)
sampTrain = sampTab.loc[trainingids,:]
sampVal = sampTab.loc[val_ids,:]
return([sampTrain, sampVal])
def sc_classAssess(stDat,washedDat, dLevel = "description1", dLevelSID="sample_name", minCells = 40, dThresh = 0, propTrain=0.25, nRand = 50, nTrees=2000):
goodGrps = np.unique(stTrain.newAnn)[stTrain.newAnn.value_counts()>minCells]
stTmp=stDat.loc[np.isin(stDat[dlevel], goodGrps) , :]
expDat_good = washedDat["expDat"].loc[stTmp.index, :]
stTrain, stVal = divide_sampTab(stTmp, propTrain, dLevel = dLevel)
expTrain=expDat_good.loc[stTrain.index,:]
expVal=expDat_good.loc[stVal.index,:]
varGenes = findVarGenes(expDat_good, washedDat["geneStats"])
cellgrps=stTrain[dLevel]
testRFs=sc_makeClassifier(expTrain, genes=varGenes, groups=cellgrps, nRand=nRand, ntrees=nTrees)
ct_scores=rf_classPredict(testRFs, expVal)
assessed= [ct_scores, stVal, stTrain]
return assessed
def sc_classThreshold(vect, classification, thresh):
TP=0;
FN=0;
FP=0;
TN=0;
calledPos = vect.loc[vect>thresh].index.values
calledNeg = vect.loc[vect<=thresh].index.values
if (np.isin(classification, calledPos)):
TP = 1
FN = 0
FP = len(calledPos) - 1
TN = len(calledNeg)
else:
TP = 0
FN = 1
FP = len(calledPos)
TN = len(calledNeg) -1
Accu = (TP + TN)/(TP + TN + FP + FN)
return Accu
def cn_clPerf(vect, sampTab, dLevel, classification, thresh, dLevelSID="sample_id"):
TP=0;
FN=0;
FP=0;
TN=0;
sampIDs = vect.index.values;
classes = sampTab.loc[sampIDs,dLevel];
actualPos = sampTab.loc[sampTab[dLevel]==classification,dLevelSID]
actualNeg = sampTab.loc[sampTab[dLevel]!=classification,dLevelSID]
calledPos = vect.loc[vect>thresh].index.values
calledNeg = vect.loc[vect<=thresh].index.values
TP = len(np.intersect1d(actualPos, calledPos));
FP = len(np.intersect1d(actualNeg, calledPos));
FN = len(actualPos)-TP;
TN = len(actualNeg)-FP;
return([TP, FN, FP, TN]);
def cn_eval(vect, sampTab, dLevel, classification, threshs=np.arange(0,1,0.05),dLevelSID="sample_id"):
ans=np.zeros([len(threshs), 7])
for i in range(0, len(threshs)):
thresh = threshs[i];
ans[i,0:4] = cn_clPerf(vect, sampTab, dLevel, classification, thresh, dLevelSID=dLevelSID);
ans[:,4] = threshs;
ans=pd.DataFrame(data=ans, columns=["TP", "FN", "FP", "TN", "thresh","FPR", "TPR"]);
TPR=ans['TP']/(ans['TP']+ans['FN']);
FPR=ans['FP']/(ans['TN']+ans['FP']);
ans['TPR']=TPR;
ans['FPR']=FPR;
return ans
def cn_classAssess(ct_scores, stVal, classLevels="description2", dLevelSID="sample_id", resolution=0.005):
allROCs = {}
evalAll=np.zeros([len(ct_scores.columns),2])
classifications= ct_scores.columns.values;
i=0
for xname in classifications:
classification=classifications[i];
tmpROC= cn_eval(ct_scores[xname],stVal,classLevels,xname,threshs=np.arange(0,1,resolution), dLevelSID=dLevelSID);
allROCs[xname] = tmpROC;
i = i + 1;
return allROCs;
def assess_comm(aTrain, aQuery, resolution = 0.005, nRand = 50, dLevelSID = "sample_name", classTrain = "cell_ontology_class", classQuery = "description2"):
ct_scores = pd.DataFrame(aQuery.X, index = aQuery.obs[dLevelSID], columns = aQuery.var.index)
stQuery= aQuery.obs
stQuery.index = ct_scores.index
stTrain= aTrain.obs
shared_cell_type = np.intersect1d(np.unique(stTrain[classTrain]), np.unique(stQuery[classQuery]))
stVal_com = stQuery.loc[np.isin(stQuery[classQuery], shared_cell_type),:]
if(nRand > 0):
tmp = np.empty([nRand, len(stVal_com.columns)], dtype=np.object)
tmp[:]="rand"
tmp=pd.DataFrame(data=tmp, columns=stVal_com.columns.values )
tmp[dLevelSID] = ct_scores.index.values[(len(ct_scores.index) - nRand):len(ct_scores.index)]
tmp.index= tmp[dLevelSID]
stVal_com= pd.concat([stVal_com, tmp])
cells_sub = stVal_com[dLevelSID]
ct_score_com = ct_scores.loc[cells_sub,:]
report= {}
ct_scores_t = ct_score_com.T
true_label = stVal_com[classQuery]
y_true=true_label.str.get_dummies()
eps = 1e-15
y_pred = np.maximum(np.minimum(ct_scores, 1 - eps), eps)
multiLogLoss = (-1 / len(ct_scores_t.index)) * np.sum(np.matmul(y_true.T.values, np.log(y_pred.values)))
pred_label = ct_scores.idxmax(axis=1)
cm= | pd.crosstab(true_label, pred_label) | pandas.crosstab |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
from collections import namedtuple
import math
import geopy.distance
pd.set_option('display.max_rows', 10000)
def generate_dataset_gps():
# tx_coord = (63.4073927,10.4775050) #old
tx_coord = (63.40742, 10.47752) #ole sine koordinater
Measurement = namedtuple("Measurement", ['gps_dist','th_dist', 'angle', 'tx_power', 'rssi'])
measurements = []
def m2d(m):
if m < 9:
return math.floor(math.sqrt(((m - 2) * 50)**2 + 100**2))
else:
return math.floor(math.sqrt(((m - 3) * 50) ** 2 + 100 ** 2))
def id2dir(id):
if id == 10:
return 90
elif id == 11:
return 0
#convert degrees decimal minutes to decimal degrees
def dmm2dd(d, dm):
m = math.floor(dm)
s = (dm - m) * 60
dd = float(d) + float(m)/60 + float(s)/(60*60)
return dd
#convert logged coord to decimal degrees
def convert_coord(n_coord, e_coord):
n_coord = n_coord.replace("N","")
d_n,dm_n = int(n_coord[:2]), float(n_coord[2:])
e_coord = e_coord.replace("E","")
d_e,dm_e = int(e_coord[:3]), float(e_coord[3:])
return (dmm2dd(d_n, dm_n), dmm2dd(d_e, dm_e))
curr_id = 0
measure_num = 0
gps_dist = 0
n_coord = 0
e_coord = 0
with open('data/raw-2/combined.csv') as file:
for line in file.readlines():
line = line.strip()
values = line.split(";")
if values[0] == 'GPS':
n_coord = values[1]
e_coord = values[2]
elif values[0] == 'RADIO':
if curr_id != values[1] or measure_num != values[2]:
curr_id = values[1]
measure_num = values[2]
coords = convert_coord(n_coord, e_coord)
gps_dist = math.floor(math.sqrt((geopy.distance.distance(coords, tx_coord).m)**2 + 100**2))
measurement = Measurement(gps_dist, m2d(int(values[2])), id2dir(int(values[1])), int(values[3]), int(values[4]))
measurements.append(measurement)
df = pd.DataFrame(measurements)
df.to_csv('data/dataset_2_w_gps.csv', index=False)
# generate_dataset_gps()
def generate_cloverleaf_dataset():
# tx_coord = (63.4073927,10.4775050) #old
tx_coord = (63.40742, 10.47752) #ole sine koordinater
Measurement = namedtuple("Measurement", ['gps_dist', 'real_dist', 'th_gps_dist','angle', 'tx_power', 'rssi'])
measurements = []
def m2d(m):
if m < 20:
return math.floor((m - 1) * 50)
else:
return math.floor((m - 2) * 50)
def id2dir(id):
if id == 10:
return 'H_BC' #horizontal polarization, best case
elif id == 11:
return 'V_BC' #vertical polarization, best case
elif id == 12:
return 'H_WC' #horizontal pol, worst case
elif id == 13:
return 'V_WC' #vertical pol, worst case
#convert degrees decimal minutes to decimal degrees
def dmm2dd(d, dm):
m = math.floor(dm)
s = (dm - m) * 60
dd = float(d) + float(m)/60 + float(s)/(60*60)
return dd
#convert logged coord to decimal degrees
def convert_coord(n_coord, e_coord):
n_coord = n_coord.replace("N","")
d_n,dm_n = int(n_coord[:2]), float(n_coord[2:])
e_coord = e_coord.replace("E","")
d_e,dm_e = int(e_coord[:3]), float(e_coord[3:])
return (dmm2dd(d_n, dm_n), dmm2dd(d_e, dm_e))
curr_id = 0
measure_num = 0
gps_dist = 0
n_coord = 0
e_coord = 0
with open('data/combined_raw-kl1-kl2.csv') as file:
for line in file.readlines():
line = line.strip()
values = line.split(";")
if values[0] == 'GPS':
n_coord = values[1]
e_coord = values[2]
elif values[0] == 'RADIO':
if curr_id != values[1] or measure_num != values[2]:
curr_id = values[1]
measure_num = values[2]
coords = convert_coord(n_coord, e_coord)
gps_dist = math.floor(geopy.distance.distance(coords, tx_coord).m)
real_dist = math.floor(math.sqrt((gps_dist**2 + (100-0.7)**2)))
measurement = Measurement(gps_dist, real_dist, m2d(int(values[2])), id2dir(int(values[1])), int(values[3]), int(values[4]))
measurements.append(measurement)
df = | pd.DataFrame(measurements) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/ronitf_heart-disease-uci/" directory.z
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../../../input/ronitf_heart-disease-uci/"))
# Any results you write to the current directory are saved as output.
# > # If you find this notebook helpful , some upvotes would be very much appreciated - That will keep me motivated 👍
# In[ ]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from eli5.sklearn import PermutationImportance
import warnings
#perm = PermutationImportance(model, random_state=1).fit(X_test, y_test)
#eli5.show_weights(perm, feature_names = X_test.columns.tolist())
warnings.filterwarnings("ignore")
sns.set_style("darkgrid")
# In[ ]:
df= | pd.read_csv("../../../input/ronitf_heart-disease-uci/heart.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xlrd
import openpyxl
import unicodecsv as csv
from math import log10, floor
from pandas.api.types import is_string_dtype
import pandas as pd
import numpy as np
import six
import six.moves
import orjson as json
from plaidcloud.rpc import utc
from plaidcloud.rpc.connection.jsonrpc import SimpleRPC
from plaidcloud.rpc.rpc_connect import Connect
from plaidcloud.utilities.query import Connection, Table
from plaidcloud.utilities import data_helpers as dh
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2013-2021, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
CSV_TYPE_DELIMITER = '::'
class ContainerLogger(object):
def info(self, msg):
print(msg, file=sys.stderr)
def debug(self, msg):
self.info(msg)
def exception(self, msg=None):
print(traceback.format_exc(), file=sys.stderr)
if msg is not None:
print(msg, file=sys.stderr)
logger = ContainerLogger()
def sql_from_dtype(dtype):
"""Returns a sql datatype given a pandas datatype
Args:
dtype (str): The pandas datatype to convert
Returns:
str: the equivalent SQL datatype
Examples:
>>> sql_from_dtype('bool')
'boolean'
>>> sql_from_dtype('float64')
'numeric'
>>> sql_from_dtype('number')
'numeric'
>>> sql_from_dtype('varchar(123)')
'text'
>>> sql_from_dtype('char(3)')
'text'
>>> sql_from_dtype('xml')
'text'
>>> sql_from_dtype('bytea')
'largebinary'
"""
mapping = {
'bool': 'boolean',
'boolean': 'boolean',
's8': 'text',
's16': 'text',
's32': 'text',
's64': 'text',
's128': 'text',
's256': 'text',
'object': 'text',
's512': 'text',
's1024': 'text',
'text': 'text',
'string': 'text',
'int8': 'smallint', # 2 bytes
'int16': 'integer',
'smallint': 'smallint',
'int32': 'integer', # 4 bytes
'integer': 'integer',
'int64': 'bigint', # 8 bytes
'bigint': 'bigint',
'float8': 'numeric',
'float16': 'numeric', # variable but ensures precision
'float32': 'numeric', # variable but ensures precision
'float64': 'numeric', # variable but ensures precision
'numeric': 'numeric',
'serial': 'serial',
'bigserial': 'bigserial',
'datetime64[s]': 'timestamp', # This may have to cover all datettimes
'datetime64[d]': 'timestamp',
'datetime64[ns]': 'timestamp',
'timestamp': 'timestamp',
'timestamp without time zone': 'timestamp',
'timedelta64[s]': 'interval', # This may have to cover all timedeltas
'timedelta64[d]': 'interval',
'timedelta64[ns]': 'interval',
'interval': 'interval',
'date': 'date',
'time': 'time',
'binary': 'largebinary',
'bytea': 'largebinary',
'largebinary': 'largebinary',
'xml': 'text',
'uuid': 'text',
'money': 'numeric',
'real': 'numeric',
'json': 'text',
'cidr': 'text',
'inet': 'text',
'macaddr': 'text',
}
dtype = str(dtype).lower()
if dtype.startswith('num'):
dtype = 'numeric'
elif 'char' in dtype:
dtype = 'text'
return mapping[dtype]
def save_typed_psv(df, outfile, sep='|', **kwargs):
"""Saves a typed psv, from a pandas dataframe. Types are analyze compatible
sql types, written in the header, like {column_name}::{column_type}, ...
Args:
df (`pandas.DataFrame`): The dataframe to create the psv from
outfile (file object or str): The path to save the output file to
sep (str, optional): The separator to use in the output file
"""
# ADT2017: _write_copy_from did something special with datetimes, but I'm
# not sure it's necessary, so I'm leaving it out.
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
column_names = [cleaned(n) for n in list(df)]
column_types = [sql_from_dtype(d) for d in df.dtypes]
header = [
CSV_TYPE_DELIMITER.join((name, sqltype))
for name, sqltype in six.moves.zip(column_names, column_types)
]
df.to_csv(outfile, header=header, index=False, sep=sep)
def list_of_dicts_to_typed_psv(lod, outfile, types, fieldnames=None, sep='|'):
""" Saves a list of dicts as a typed psv. Needs a dict of sql types. If
provided, fieldnames will specify the column order.
Args:
lod (:type:`list` of :type:`dict`): The list of dicts containing the data
to use to create the psv
outfile (str): The path to save the output file to, including file name
types (dict): a dict with column names as the keys and column datatypes as
the values
fieldnames (:type:`list` of :type:`str`, optional): A list of the field names.
If none is provided, defaults to the keys in `types`
sep (str): The separator to use in the output file
"""
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
header = {
name: CSV_TYPE_DELIMITER.join((cleaned(name), sqltype))
for name, sqltype in types.items()
}
if fieldnames is None:
# Caller doesn't care about the order
fieldnames = list(types.keys())
if isinstance(outfile, six.string_types):
buf = open(outfile, 'wb')
else:
buf = outfile
try:
writer = csv.DictWriter(buf, fieldnames=fieldnames, delimiter=sep)
writer.writerow(header) # It's not just the keys, so we're not using writeheader
for row in lod:
writer.writerow(row)
finally:
if isinstance(outfile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def get_project_variables(token, uri, project_id):
"""It opens a connection to Analyze and then
gets vars for a given project
Args:
token (str): oAuth token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
project_id (str): Id of the Project for which to grab the variables
Returns:
dict: Variables as key/values
"""
rpc = SimpleRPC(token, uri, verify_ssl=True)
try:
project_vars = rpc.analyze.project.variables(project_id=project_id)
except:
project_vars = rpc.analyze.project.variables(project=project_id)
return {pv['id']: pv['value'] for pv in project_vars}
def download(tables, configuration=None, retries=5, conn=None, clean=False, **kwargs):
"""This replaces the old get_tables() that was client-specific.
It opens a connection to Analyze and then
accepts a set of tables and saves them off to a local location.
For now, tables are understood to be typed psv's, but that can expand to
suit the need of the application (for instance, Excel.)
Args:
tables (set or list): table paths to retrieve (for backwards compatibility, you can leave off the initial '/')
token (str): token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
local_storage_path (str): local path where files should be saved. Would typically originate
from a local config.
**kwargs:
config (dict) contains a dict of config settings
token (str) simpleRFC authorization token
uri (str): uri e.g. 'https://ci.plaidcloud.com/json-rpc/'
local_storage_path (str) Target for files being saved
Returns:
The return value of function. If retries are exhausted, raises the
final Exception.
Examples:
"""
# TODO: if configuration is None, revert to **kwargs for the params we need.
if not conn:
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
try:
return_df = configuration['return_df']
except:
return_df = True
try:
project_id = configuration['project_id']
except:
project_id = conn.project_id
dfs = []
for table in tables:
table_path = table.get('table_name')
query = table.get('query')
table_obj = table.get('table_object')
df = None # Initial value
# wipe this out each time through
clean_df = pd.DataFrame()
logger.debug("Attempting to download {0}...".format(table_path))
tries = 1
if table_obj is not None:
# RPC table object exists; proceed to use it to fetch data
while tries <= retries:
if query is None:
# no query passed. fetch whole table
df = conn.get_dataframe(table_obj, clean=clean)
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
elif isinstance(query, six.string_types):
# query object passed in. execute it
try:
df = conn.get_dataframe_by_querystring(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
else:
# query object passed in. execute it
try:
df = conn.get_dataframe_by_query(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
tries += 1
columns = table_obj.cols()
if columns:
if isinstance(df, pd.core.frame.DataFrame):
cols = [c['id'] for c in columns if c['id'] in df.columns.tolist()]
df = df[cols] # this ensures that the column order is as expected
else:
cols = [c['id'] for c in columns]
df = pd.DataFrame(columns=cols) # create empty dataframe with expected metadata/shape
else:
if not table_path.startswith('/'):
table_path = '/{}'.format(table_path)
table_result = None
while not table_result and tries <= retries:
tries += 1
try:
table_result = conn.analyze.table.table(project_id=project_id, table_path=table_path)
logger.debug("Downloaded {0}...".format(table_path))
break
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
df = table_result_to_df(table_result or pd.DataFrame())
if not isinstance(df, pd.core.frame.DataFrame):
logger.exception('Table {0} failed to download!'.format(table_path))
elif len(df.columns) == 0:
logger.exception('Table {0} downloaded 0 records!'.format(table_path))
else:
if clean and query:
# Use the old cleaning process for things other than the full query.
clean_df = dh.clean_frame(df)
else:
clean_df = df
dfs.append({'df': clean_df, 'name': table_path})
return dfs
def load(source_tables, fetch=True, cache_locally=False, configuration=None, conn=None, clean=False):
"""Load frame(s) from requested source, returning a list of dicts
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, typed_df[col]))
return typed_df
def dwim_save(df, name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, saves a dataframe as an analyze table.
Otherwise saves it as a typed psv in localdir.
Args:
df (`pandas.DataFrame`): The dataframe to save
name (str): The name to save this dataframe as
localdir (str, optional): The local path to save the typed psv
lvl (str, optional): What level (project/model) the table should be
extension (str, optional): What file extension to give the output file
sep (str, optional): The separator to use in the output file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import save, save_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
save_fn = {
'model': save,
'project': save_project,
}[lvl]
save_fn(df, name)
except ImportError:
# We must not be on an app server, so save as typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
save_typed_psv(df, path, sep)
def dwim_load(name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, loads an analyze table.
Otherwise loads a typed psv from localdir.
Args:
name (str): The name of the table or file to load
localdir (str, optional): The path to the directory where the local file is stored
lvl (str, optional): The level (model/project) of the table to load
extension (str, optional): The flie extension of the local file
sep (str, optional): The separator used in the local file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import load, load_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
load_fn = {
'model': load,
'project': load_project,
}[lvl]
return load_fn(name)
except ImportError:
# We must not be on an app server, so load from typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
return load_typed_psv(path, sep)
def clean_uuid(id):
"""Removes any invalid characters from a UUID and ensures it is 32 or 36 characters
Args:
id (str): The ID to clean
Returns:
str: `id` with any invalid characters removed
"""
# !! WARNING: If you're calling this in new code, make sure it's really what you
# !! want. It used to remove dashes. That turned out to be a bad idea. Now
# !! it leaves dashes in.
#
# !! If you've found a bug related to dashes being left in, and this is
# !! being called on lookup, you should probably just remove the call to
# !! clean_uuid. Going forward, we don't remove dashes.
if id is None:
return None
name = six.text_type(id).lower()
valid_chars = '0123456789abcdef-'
cleaned_id = u''.join(n for n in name if n in valid_chars)
if '-' in cleaned_id:
if len(cleaned_id) != 36:
raise Exception("Could not clean id {}. Not 36 characters long.".format(id))
else:
if len(cleaned_id) != 32:
raise Exception("Could not clean id {}. Not 32 characters long.".format(id))
return cleaned_id
def clean_name(name):
"""
DEPRECATED: does nothing
Removes any invalid characters from a name and limits it to 63 characters
Args:
name (str): The name to clean
Returns:
str: The cleaned version of `name`
"""
return name
def clean_filename(name):
"""Remove '/' from a name
Args:
name (str): the filename to clean
Returns:
str: the cleaned version of `name`
"""
if name is None:
return None
# everything's fine except /
return six.text_type(name).translate({'/': None})
def describe(df):
"""Shorthand for df.describe()
Args:
df (`pandas.DataFrame`): The dataframe to describe
Returns:
summary: Series/DataFrame of summary statistics
"""
return df.describe()
def unique_values(df, column):
"""Returns unique values in the provided column
Args:
df (`pandas.DataFrame`): The DataFrame containing data
column (str): The column to find unique values in
Returns:
list: The unique values in the column
"""
return df[column].unique()
def count_unique(group_by, count_column, df):
"""Returns a count of unique items in a dataframe
Args:
group_by (str): The group by statement to apply to the dataframe
count_column (str): The column to count unique records in
df (`pandas.DataFrame`): The DataFrame containing the data
Returns:
int: The count of unique items in the specified column after grouping
"""
return df.groupby(group_by)[count_column].apply(lambda x: len(x.unique()))
def sum(group_by, df):
return df.groupby(group_by).sum()
def std(group_by, df):
return df.groupby(group_by).std()
def mean(group_by, df):
return df.groupby(group_by).mean()
def count(group_by, df):
return df.groupby(group_by).count()
def inner_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps only matches
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='inner')
def outer_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps data from both frames and matches up using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='outer')
def left_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from left frame and any matches in right using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='left')
def right_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from right frame and any matches in left using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='right')
def anti_join(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from left frame that is not found in right frame
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
indicator_status = False
indicator_name = '_merge'
left_cols = left_frame.columns
# avoid collision with pd generated indicator name
while not indicator_status:
if indicator_name in left_cols:
indicator_name = '_' + indicator_name
else:
indicator_status = True
df = pd.merge(left_frame, right_frame[right_on], how='left', left_on=left_on, right_on=right_on, indicator=indicator_name)
df = df[df[indicator_name] == 'left_only']
del df[indicator_name]
return df
def compare(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from right frame and any matches in left using the on_columns"""
#20180420 PBB Is "compare" a good name for this, it's basically a right-join in SQL terms?
#20180420 MWR It's quite old legacy. Not sure this one has ever been used for anything. Perhaps
# we can just do away with it.
if right_on is None:
right_on = left_on
return pd.merge(left_frame, right_frame, left_on=left_on, right_on=right_on, how='outer')
def apply_rule(df, rules, target_columns=None, include_once=True, show_rules=False):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
rules (list): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
Returns:
pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_final = pd.DataFrame()
df['temp_index'] = df.index
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
# Establish new column(s) as blank columns.
for column in target_columns:
df[column] = ''
def exclude_matched(include, match):
"""
Exclude if matched, or if previously excluded
Please do not change the 'if match is True:' line to 'if match:'. It matters here.
"""
return False if match is True else include
rule_num = 0
for rule in rules:
rule_num = rule_num + 1
rule_condition = rule.get('condition')
# Find subset based on condition
if rule_condition is not None and rule_condition != '' and str(rule_condition) != 'nan':
try:
df_subset = df[df['include'] == True].query(rule_condition, engine='python')
print('subset length: {}'.format(len(df[df['include'] == True])))
if show_rules:
df_subset['rule_number'] = str(rule_num)
df_subset['rule'] = str(rule_condition)
except Exception as e:
# TODO update this. We should capture all exceptions in an exception table.
df_subset = | pd.DataFrame() | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = | Categorical(['x', 'y', 'z']) | pandas.Categorical |
from sqlalchemy import true
import FinsterTab.W2020.DataForecast
import datetime as dt
from FinsterTab.W2020.dbEngine import DBEngine
import pandas as pd
import sqlalchemy as sal
import numpy
from datetime import datetime, timedelta, date
import pandas_datareader.data as dr
def get_past_data(self):
"""
Get raw data from Yahoo! Finance for SPY during Great Recession
Store data in MySQL database
:param sources: provides ticker symbols of instruments being tracked
"""
# Assume that date is 2010
now = dt.date(2009, 1, 1) # Date Variables
start = now - timedelta(days=1500) # get date value from 5 years ago
end = now
# data will be a 2D Pandas Dataframe
data = dr.DataReader('SPY', 'yahoo', start, end)
symbol = [3] * len(data) # add column to identify instrument id number
data['instrumentid'] = symbol
data = data.reset_index() # no designated index - easier to work with mysql database
# Yahoo! Finance columns to match column names in MySQL database.
# Column names are kept same to avoid any ambiguity.
# Column names are not case-sensitive.
data.rename(columns={'Date': 'date', 'High': 'high', 'Low': 'low', 'Open': 'open', 'Close': 'close',
'Adj Close': 'adj close', 'Volume': 'volume'}, inplace=True)
data.sort_values(by=['date']) # make sure data is ordered by trade date
# send data to database
# replace data each time program is run
data.to_sql('dbo_paststatistics', self.engine, if_exists=('replace'),
index=False,
dtype={'date': sal.Date, 'open': sal.FLOAT, 'high': sal.FLOAT, 'low': sal.FLOAT,
'close': sal.FLOAT, 'adj close': sal.FLOAT, 'volume': sal.FLOAT})
# Tests the accuracy of the old functions
def accuracy(self):
query = 'SELECT * FROM dbo_algorithmmaster'
algorithm_df = pd.read_sql_query(query, self.engine)
query = 'SELECT * FROM dbo_instrumentmaster'
instrument_master_df = pd.read_sql_query(query, self.engine)
# Changes algorithm code
for code in range(len(algorithm_df)):
# Dynamic range for changing instrument ID starting at 1
for ID in range(1, len(instrument_master_df) + 1):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = %d AND b.instrumentid = %d AND a.algorithmcode = "%s"' % (
ID, ID, algorithm_df['algorithmcode'][code])
df = pd.read_sql_query(query, self.engine)
count = 0
# Calculates accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(len(df)):
absolute_percent_error.append(
abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / len(df)
# return the average percent error calculated above
print("Average percent error for instrument: %d and algorithm: %s " % (ID, algorithm_df['algorithmcode'][code]), average_percent_error)
#print('Algorithm:', algorithm_df['algorithmcode'][code])
#print('instrumentid: %d' % ID, instrument_master_df['instrumentname'][ID - 1])
#print('length of data is:', len(df))
#print('number correct: ', count)
d = len(df)
b = (count / d) * 100
#print('The accuracy is: %.2f%%\n' % b)
# Isolated tests for ARIMA as we where trying to determine why it was so accurate
def arima_accuracy(self):
query = 'SELECT * FROM dbo_algorithmforecast AS a, dbo_instrumentstatistics AS b WHERE a.forecastdate = b.date AND' \
' a.instrumentid = 1 AND b.instrumentid = 1 AND a.algorithmcode = "ARIMA"'
df = pd.read_sql_query(query, self.engine)
df = df.tail(10)
df = df.reset_index(drop=true)
#print(df)
arima_count = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x] \
or (df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
arima_count += 1
#print(df['close'], df['forecastcloseprice'])
#print(arima_count)
#print(arima_count/len(df))
# Accuracy test for the new function MSF1
def MSF1_accuracy(self):
# Queires the database to grab all of the Macro Economic Variable codes
query = "SELECT macroeconcode FROM dbo_macroeconmaster WHERE activecode = 'A'"
id = pd.read_sql_query(query, self.engine)
id = id.reset_index(drop=True)
# Queries the database to grab all of the instrument IDs
query = 'SELECT instrumentid FROM dbo_instrumentmaster'
id2 = pd.read_sql_query(query, self.engine)
id2 = id2.reset_index(drop=True)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# Bool to determine whether we append to dbo_tempvisualize or replace the values
to_append = False
# Create a for loop to iterate through all of the instrument ids
for v in id2['instrumentid']:
# Initializes a list for which we will eventually be storing all data to add to the macroeconalgorithm database table
data = []
# Data1 will be used to store the forecastdate, instrumentid, forecastprice, and algorithm code
# It will be used to graph our backtested forecast against the actual instrument prices
data1 = []
# Getting Dates for Future Forecast as well as actual close prices for instrumentID#
# We chose 2018 - 2020, to alter this date range simply change the dates in the 3rd line of the query for the dates you want to test on
# Make sure they are valid dates as some instruments only have statistics that go back so far, check the instrument statistic table to figure out how far back each instrument goes
query = "SELECT date, close FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, start_date, end_date)
# instrument_stats will hold the closing prices and the dates for the dates we are forecasting for
instrument_stats = pd.read_sql_query(query, self.engine)
# We isolate the dates and closing prices into individual arrays to make them easier to work with
date = []
close = []
for i in instrument_stats['date']:
date.append(i)
for i in instrument_stats['close']:
close.append(i)
# n will always correspond to the amount of dates, as the amount of dates is the number of data points being compared
n = len(date)
# Median_forecast will be a dictionary where the key is the date and the value is a list of forecasted prices
median_forecast = {}
# This disctionary will be used to easily combine all of the forecasts for different dates to determine the median forecast value
for i in date:
temp = {i: []}
median_forecast.update(temp)
# This query will grab quarterly instrument prices from between 2014 and the current date to be used in the forecasting
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(v, train_date, start_date)
# Executes the query and stores the result in a dataframe variable
df2 = pd.read_sql_query(query, self.engine)
# This for loop iterates through the different macro economic codes to calculate the percent change for each macroeconomic variable
for x in id['macroeconcode']:
# Retrieves the most recent macro economic statistics prior to the date for which we are testing our algorithm
query = "SELECT * FROM dbo_macroeconstatistics WHERE macroeconcode = {} and date <= {} ".format('"' + str(x) + '"', start_date)
df = pd.read_sql_query(query, self.engine)
macro = df.tail(n)
SP = df2.tail(n)
temp = df.tail(n + 1)
temp = temp.reset_index()
# Converts macro variables to precent change
macroPercentChange = macro
macro = macro.reset_index(drop=True)
SP = SP.reset_index(drop=True)
macroPercentChange = macroPercentChange.reset_index(drop=True)
for i in range(0, n):
if (i == 0):
macrov = (macro['statistics'][i] - temp['statistics'][i]) / temp['statistics'][i]
macroPercentChange['statistics'].iloc[i] = macrov * 100
else:
macrov = (macro['statistics'][i] - macro['statistics'][i - 1]) / macro['statistics'][i - 1]
macroPercentChange['statistics'].iloc[i] = macrov * 100
# Algorithm for forecast price
S = calc(self, macroPercentChange, SP,n) # Calculates the average GDP and S&P values for the given data points over n days and performs operations on GDP average
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# temp_price will be used to hold the previous forecast price for the next prediction
temp_price = 0
# Setup a for loop to calculate the final forecast price and add data to the list variable data
for i in range(n):
if isFirst:
if x in [2, 3, 4]:
temp_price = ((S * (SP['close'].iloc[n-1])) + (SP['close'].iloc[n-1]))
isFirst = False
else:
temp_price = ((S * SP['close'].iloc[n-1]) + SP['close'].iloc[n-1])
isFirst = False
else:
if x in [2, 3, 4]:
temp_price = ((S * temp_price) + temp_price)
else:
temp_price = ((S * temp_price) + temp_price)
# Once the forecast price is calculated append it to median_forecast list
median_forecast[date[i]].append(temp_price)
# Calculates the median value for each date using a list of prices forecasted by each individual macro economic variable
forecast_prices = []
for i in date:
# Sort the forecasted prices based on date
sorted_prices = sorted(median_forecast[i])
# calculate the median forecasted price for each date
if len(sorted_prices) % 2 == 0:
center = int(len(sorted_prices) / 2)
forecast_prices.append(sorted_prices[center])
else:
center = int(len(sorted_prices) / 2)
forecast_prices.append((sorted_prices[center] + sorted_prices[center - 1]) / 2)
# Set up a for loop to construct a list using variables associated with macroeconalgorithm database table
for i in range(len(forecast_prices)):
data.append([date[i], v, 'ALL', forecast_prices[i], close[i], 'MSF1', 0])
data1.append([date[i], v, forecast_prices[i], 'MSF1'])
# Convert data list to dataframe variable
df = pd.DataFrame(data, columns=['forecastdate', 'instrumentid', 'macroeconcode',
'forecastcloseprice', 'close', 'algorithmcode', 'prederror'])
df1 = pd.DataFrame(data1, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine, if_exists=('replace' if not to_append else 'append'), index=False)
to_append = True
# Populates absolute_percent_error with the calculated percent error for a specific data point
absolute_percent_error = []
for i in range(n):
absolute_percent_error.append(abs((df['close'].loc[i] - df['forecastcloseprice'].loc[i]) / df['close'].loc[i]))
# Calculate sum of percent error and find average
average_percent_error = 0
for i in absolute_percent_error:
average_percent_error = average_percent_error + i
average_percent_error = average_percent_error / n
count = 0
# Calculates trend accuracy
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][
x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] <
df['forecastcloseprice'][
x]):
count += 1
length = len(df)
trend_error = (count / length) * 100
print("Trend accuracy for %s for instrument %d is %.2f%%" % ('MSF1', v, trend_error))
print("The average percent error for %s for instrument %d is %.2f%%" % ('MSF1', v, average_percent_error * 100))
# return the average percent error calculated above
# This function is not currently used, it can be used to check the accuracy of MSF2 but will need set weightings
# The functions below this one will test the accuracy using a variety of weightings and choose the weightings with the best results
def MSF2_accuracy(self):
n = 8
#Gets the macro economic variables codes and names to loop through the inidividual macro variables
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
macrocodes = []
indicators = {}
for i in range(len(data['macroeconcode'])):
macrocodes.append(data['macroeconcode'].loc[i])
d = {data['macroeconcode'].loc[i]: []}
indicators.update(d)
#Gets the instrument ids to loop through the individual instruments
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data = pd.read_sql_query(query, self.engine)
instrumentids = []
for i in data['instrumentid']:
instrumentids.append(i)
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
#Loops through each instrument id to preform error calculations 1 instrument at a time
for i in instrumentids:
#Gets the instrument statistics to run through the function
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, train_date, start_date)
train_data = pd.read_sql_query(query, self.engine)
#Gets the instrument statistics to check against the forecast prices
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentID, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(i, start_date, end_date)
check_data = pd.read_sql_query(query, self.engine)
#Gets the dates for the future forecast prices so they match the instrument statistics
dates = []
for l in check_data['date']:
dates.append(str(l))
#Loops through the macro economic variable codes to calculate percent change
for j in macrocodes:
#Retrieves macro economic statistics for each macro variables
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format('"' + j + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for k in range(n):
temp = data.tail(n + 1)
data = data.tail(n)
if j == k:
macrov = (data['statistics'].iloc[k] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
indicators[j].append(macrov)
else:
macrov = (data['statistics'].iloc[k] - data['statistics'].iloc[k - 1]) / data['statistics'].iloc[
k - 1]
indicators[j].append(macrov)
#Preforms the actual calculations and stores them in an array called calculated forecast
calculated_forecast = []
for k in range(n):
stat = indicators['GDP'][k] * 1 - (indicators['UR'][k] * 0 + indicators['IR'][k] * .5) - (
indicators['MI'][k] * indicators['MI'][k])
stat = (stat * train_data['close'].iloc[n-1]) + train_data['close'].iloc[n-1]
calculated_forecast.append(stat)
#Creates and inserts the forecast dates, instrument ids, calculated forecast prices, and actual close prices into an array
results = []
for k in range(n):
results.append([dates[k], i, calculated_forecast[k], check_data['close'].loc[k]])
#Creates a dataframe out of the array created above
df = pd.DataFrame(results, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'close'])
#print(df)
count = 0
# Calculates accuracy
percent_error = []
temp_error = 0
for x in range((len(df) - 1)):
# Check if upward or downward trend
if (df['close'][x + 1] > df['close'][x] and df['forecastcloseprice'][x + 1] > df['forecastcloseprice'][x]) or \
(df['close'][x + 1] < df['close'][x] and df['forecastcloseprice'][x + 1] < df['forecastcloseprice'][x]):
count += 1
temp_error = abs((df['close'][x] - df['forecastcloseprice'][x]))/df['close']
#Percent Error calculation
temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
absolute_percent_error = [abs(ele) for ele in temp_error]
percent_error.append(absolute_percent_error)
if df['instrumentid'][i] == 1:
gm_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
gm_absolute_percent_error = [abs(ele) for ele in gm_temp_error]
#Calculate sum of percent error and find average
gm_average_percent_error = sum(gm_absolute_percent_error) / 8
#print("Average percent error of MSF2 on GM stock is: ", gm_average_percent_error * 100, "%")
if df['instrumentid'][i] == 2:
pfe_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
pfe_absolute_percent_error = [abs(ele) for ele in pfe_temp_error]
#Calculate sum of percent error and find average
pfe_average_percent_error = sum(pfe_absolute_percent_error) / 8
#print("Average percent error of MSF2 on PFE stock is: ", pfe_average_percent_error * 100, "%")
if df['instrumentid'][i] == 3:
spy_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
spy_absolute_percent_error = [abs(ele) for ele in spy_temp_error]
#Calculate sum of percent error and find average
spy_average_percent_error = sum(spy_absolute_percent_error) / 8
#print("Average percent error of MSF2 on S&P 500 stock is: ", spy_average_percent_error * 100, "%")
if df['instrumentid'][i] == 4:
xph_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
xph_absolute_percent_error = [abs(ele) for ele in xph_temp_error]
#Calculate sum of percent error and find average
xph_average_percent_error = sum(xph_absolute_percent_error) / 8
#print("Average percent error of MSF2 on XPH stock is: ", xph_average_percent_error * 100, "%")
if df['instrumentid'][i] == 5:
carz_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
carz_absolute_percent_error = [abs(ele) for ele in carz_temp_error]
#Calculate sum of percent error and find average
carz_average_percent_error = sum(carz_absolute_percent_error) / 8
#print("Average percent error of MSF2 on CARZ index stock is: ", carz_average_percent_error * 100, "%")
if df['instrumentid'][i] == 6:
tyx_temp_error = (df['close'] - df['forecastcloseprice']) / df['close']
tyx_absolute_percent_error = [abs(ele) for ele in tyx_temp_error]
#Calculate sum of percent error and find average
tyx_average_percent_error = sum(tyx_absolute_percent_error) / 8
#print("Average percent error of MSF2 on TYX 30-YR bond is: ", tyx_average_percent_error * 100, "%")
d = len(df)
b = (count / d) * 100
#Prints the trend accuracy
#print('The accuracy for instrument %d: %.2f%%\n' % (i, b))
#Create weightings MSF2 runs the MSF2 algorithm for past dates and compares them to actual instrument prices, generating a percent error calculation
#We then iterate through several different weightings and we compare each percent error for each instrument and determine the weightings with the lowest percent error
def create_weightings_MSF2(self, setWeightings):
# Query to grab the macroeconcodes and macroeconnames from the macroeconmaster database table
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
# Query to grab the instrumentid and instrument name from the instrumentmaster database table
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster'
data1 = pd.read_sql_query(query, self.engine)
# Keys is a dictionary that will be used to store the macro econ code for each macro econ name
keys = {}
for i in range(len(data)):
keys.update({data['macroeconname'].iloc[i]: data['macroeconcode'].iloc[i]})
# ikeys is a dictionary that will be used to store instrument ids for each instrument name
ikeys = {}
for x in range(len(data1)):
ikeys.update({data1['instrumentname'].iloc[x]: data1['instrumentid'].iloc[x]})
#Vars is a dictionary used to store the macro economic variable percent change for each macro economic code
vars = {}
#Vars is only populated with the relevant macro economic variables (GDP, COVI, CPIUC, and FSI)
for i in data['macroeconcode']:
if (i == 'GDP' or i == 'UR' or i == 'IR' or i == 'MI'):
d = {i: []}
vars.update(d)
#Weightings is used to store the best weightings for each instrument id which is returned to dataforecast and used for actual prediction
weightings = {}
#n represents the number of datapoints we are working with (represented in quarters)
n = 8
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2018-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2020-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2016-01-01'"
# For loop to loop through the macroeconomic codes to calculate the macro economic variable percent change
for i in keys:
# Check to make sure the macroeconcode we are working with is one of the relevant ones
if keys[i] in vars:
# Query to grab the macroeconomic statistics from the database using the relevant macro economic codes
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format(
'"' + keys[i] + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for j in range(n):
# This will grab the n+1 statistic to use to calculate the percent change to the n statistic
temp = data.tail(n + 1)
# This will grab the most recent n statistics from the query, as we are working only with n points
data = data.tail(n)
# For the first iteration we need to use the n+1th statistic to calculate percent change on the oldest point
if j == 0:
macrov = (data['statistics'].iloc[j] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
vars[keys[i]].append(macrov)
else:
macrov = (data['statistics'].iloc[j] - data['statistics'].iloc[j - 1]) / \
data['statistics'].iloc[j - 1]
vars[keys[i]].append(macrov)
# If you are not using set weightings then this if statement will run and create the best fit weightings
if not setWeightings:
# We now iterate through the instrument ids
for x in ikeys:
# This query will grab the quarterly instrument statistics from 2016 to 2018
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(ikeys[x], train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = pd.read_sql_query(query, self.engine)
instrumentStats = instrumentStats.tail(n)
#Best weightings will be used to store the best weightings for each instrument
best_weightings = [0, 0, 0]
#Best avg error will be used to store the best average percent error for each isntrument
best_avg_error = -1
#Best trend error will be used to store the best trend error for each instrument
best_trend_error = -1
#Best forecast prices will be used to store the forecast prices for the best weightings to store them in a database for visual comparison later
best_forecast_prices = []
# We now iterate through all 3 different possible weightings
for weight in numpy.arange(-5.7, 2.8, .25):
for uweight in numpy.arange(-3.7, 3.6, .25):
for iweight in numpy.arange(-.8, .9, .25):
# We intialize a list to store the resulting forecasted prices to compare in another function
stat_check = []
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# This is the actual calculation of MSF3 where we store the result in stat_check to compare to actual instrument prices
for i in range(n):
if isFirst:
#Change to pluses and test accuracy
stat = vars['GDP'][i] * weight - vars['UR'][i] * uweight + vars['IR'][i] * iweight - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * instrumentStats['close'].iloc[n-1]) + instrumentStats['close'].iloc[n-1]
stat_check.append(stat)
temp_price = stat
isFirst = False
else:
stat = vars['GDP'][i] * weight - (vars['UR'][i] * uweight + vars['IR'][i] * iweight) - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * temp_price) + temp_price
stat_check.append(stat)
temp_price = stat
# We call to the weight check function using the list of forecasted prices, the current instrument id, the amount of datapoints we are working with, and the name of the function we are testing
# It then returns the average percent error and trend error for the forecasted prices, as well as the dates we are forecasting for so we can insert them into the visualize table
temp_avg_error, temp_trend_error, dates = weight_check(DBEngine().mysql_engine(), stat_check, ikeys[x], n, 'MSF2', start_date, end_date)
# Check to see if the best_avg_error has been initialized to a valid average percent error, if not then no average error or trend error has been calculated yet
if (best_avg_error < 0):
# If so store the average percent error, the best weightings, best trend error, and the resulting forecasted prices for comparison with other weightings
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Otherwise check if the newly calculated average percent error is worse than the newly calculated one
elif (best_avg_error > temp_avg_error):
# And if so set the values for all the relevant variables
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Print statements to view the average percent error, trend error, and best weightings
print("The lowest avg percent error is %.7f%% for instrumentID %d" % (best_avg_error * 100, ikeys[x]), ' for function: MSF2')
print("The weightings are: ", best_weightings, ' for function: MSF2')
print('The trend accuracy is: ', best_trend_error)
# initializes weightings dictionary as the best weightings found for each instrument id
weightings[ikeys[x]] = best_weightings
# visual_comparisons will be used to store the past forecasted prices so we can visualize them compared to actual instrument prices on a graph
visual_comparisons = []
for k in range(n):
visual_comparisons.append([dates[k], ikeys[x], best_forecast_prices[k], 'MSF2'])
df1 = pd.DataFrame(visual_comparisons, columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine,
if_exists=('append'), index=False)
# The weightings for each instrument ID are returned to dataforecast and used for prediction
return weightings
# This else statement will make use of the preset weightings for prediction and comparison
else:
# These are the set weightings as of 4/14/2020, these may not be relevant in the future. Feel free to change them
weightings = {1: [-2.2, 3.3, 0.44999999999999996],
2: [1.0499999999999998, -3.2, -0.8],
3: [2.55, 3.3, 0.7],
4: [0.04999999999999982, 3.05, 0.7],
5: [-4.7, 3.3, 0.44999999999999996],
6: [-1.2000000000000002, -3.7, -0.8]}
# We now iterate through the instrument ids
for x in ikeys:
# This query will grab the quarterly instrument statistics from 2016 to 2018
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_instrumentstatistics WHERE instrumentid = {} AND date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(ikeys[x], train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = pd.read_sql_query(query, self.engine)
instrumentStats = instrumentStats.tail(n)
# Best weightings will be used to store the best weightings for each instrument
best_weightings = weightings[ikeys[x]]
# avg error will be used to store the best average percent error for each isntrument
avg_error = 0
# trend error will be used to store the best trend error for each instrument
trend_error = 0
# Best forecast prices will be used to store the forecast prices for the best weightings to store them in a database for visual comparison later
best_forecast_prices = []
# We intialize a list to store the resulting forecasted prices to compare in another function
stat_check = []
# isFirst will determine whether or not this is the first calculation being done
# If it is true then we use the most recent instrument statistic to forecast the first pricepoint
# IF it is false then we use the previous forecast price to predict the next forecast price
isFirst = True
# This is the actual calculation of MSF3 where we store the result in stat_check to compare to actual instrument prices
for i in range(n):
if isFirst:
# Change to pluses and test accuracy
stat = vars['GDP'][i] * best_weightings[0] - vars['UR'][i] * best_weightings[1] + vars['IR'][i] * best_weightings[2] - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * instrumentStats['close'].iloc[n - 1]) + instrumentStats['close'].iloc[
n - 1]
stat_check.append(stat)
temp_price = stat
isFirst = False
else:
stat = vars['GDP'][i] * best_weightings[0] - (vars['UR'][i] * best_weightings[1] + vars['IR'][i] * best_weightings[2]) - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * temp_price) + temp_price
stat_check.append(stat)
temp_price = stat
# We call to the weight check function using the list of forecasted prices, the current instrument id, the amount of datapoints we are working with, and the name of the function we are testing
# It then returns the average percent error and trend error for the forecasted prices, as well as the dates we are forecasting for so we can insert them into the visualize table
avg_error, trend_error, dates = weight_check(DBEngine().mysql_engine(), stat_check,
ikeys[x], n, 'MSF2', start_date,
end_date)
# Print statements to view the average percent error, trend error, and best weightings
print("The lowest avg percent error is %.7f%% for instrumentID %d" % (avg_error * 100, ikeys[x]),
' for function: MSF2')
print("The weightings are: ", best_weightings, ' for function: MSF2')
print('The trend accuracy is: ', trend_error)
# visual_comparisons will be used to store the past forecasted prices so we can visualize them compared to actual instrument prices on a graph
visual_comparisons = []
for k in range(n):
visual_comparisons.append([dates[k], ikeys[x], stat_check[k], 'MSF2'])
df1 = pd.DataFrame(visual_comparisons,
columns=['forecastdate', 'instrumentid', 'forecastcloseprice', 'algorithmcode'])
df1.to_sql('dbo_tempvisualize', self.engine,
if_exists=('append'), index=False)
# The weightings for each instrument ID are returned to dataforecast and used for prediction
return weightings
#Create weightings MSF2_Past_Dates runs the MSF2 algorithm for SPY for past dates and compares them to actual instrument prices from the recession period, generating a percent error calculation
#We then iterate through several different weightings and we compare each percent error for each instrument and determine the weightings with the lowest percent error
def create_weightings_MSF2_Past_Dates(self):
toReplace = False
# Query to grab the macroeconcodes and macroeconnames from the macroeconmaster database table
query = "SELECT macroeconcode, macroeconname FROM dbo_macroeconmaster WHERE activecode = 'A'"
data = pd.read_sql_query(query, self.engine)
# Query to grab the instrumentid and instrument name from the instrumentmaster database table
query = 'SELECT instrumentid, instrumentname FROM dbo_instrumentmaster WHERE instrumentid = 3'
data1 = pd.read_sql_query(query, self.engine)
# Keys is a dictionary that will be used to store the macro econ code for each macro econ name
keys = {}
for i in range(len(data)):
keys.update({data['macroeconname'].iloc[i]: data['macroeconcode'].iloc[i]})
# ikeys is a dictionary that will be used to store instrument ids for each instrument name
ikeys = {}
for x in range(len(data1)):
ikeys.update({data1['instrumentname'].iloc[x]: data1['instrumentid'].iloc[x]})
#Vars is a dictionary used to store the macro economic variable percent change for each macro economic code
vars = {}
#Vars is only populated with the relevant macro economic variables (GDP, COVI, CPIUC, and FSI)
for i in data['macroeconcode']:
if (i == 'GDP' or i == 'UR' or i == 'IR' or i == 'MI'):
d = {i: []}
vars.update(d)
#Weightings is used to store the best weightings for each instrument id which is returned to dataforecast and used for actual prediction
weightings = {}
#n represents the number of datapoints we are working with (represented in quarters)
n = 8
# These are the date ranges we are working with
# start_date represents the starting date for the forecasts and the end of the training dates
start_date = "'2007-01-01'"
# end_date represents the date for which the forecasting ends
end_date = "'2009-01-01'"
# train_date represents the date we start collecting the instrument statistics used to forecast prices
train_date = "'2005-01-01'"
#For loop to loop through the macroeconomic codes to calculate the macro economic variable percent change
for i in keys:
#Check to make sure the macroeconcode we are working with is one of the relevant ones
if keys[i] in vars:
#Query to grab the macroeconomic statistics from the database using the relevant macro economic codes
query = "SELECT date, statistics, macroeconcode FROM dbo_macroeconstatistics WHERE macroeconcode = {} AND date <= {}".format('"' + keys[i] + '"', start_date)
data = pd.read_sql_query(query, self.engine)
# For loop to retrieve macro statistics and calculate percent change
for j in range(n):
#This will grab the n+1 statistic to use to calculate the percent change to the n statistic
temp = data.tail(n + 1)
#This will grab the most recent n statistics from the query, as we are working only with n points
data = data.tail(n)
#For the first iteration we need to use the n+1th statistic to calculate percent change on the oldest point
if j == 0:
macrov = (data['statistics'].iloc[j] - temp['statistics'].iloc[0]) / temp['statistics'].iloc[0]
vars[keys[i]].append(macrov)
else:
macrov = (data['statistics'].iloc[j] - data['statistics'].iloc[j - 1]) / \
data['statistics'].iloc[j - 1]
vars[keys[i]].append(macrov)
# This query will grab the quarterly instrument statistics from the train date to the start date
query = "SELECT date, close, instrumentid FROM ( SELECT date, close, instrumentid, ROW_NUMBER() OVER " \
"(PARTITION BY YEAR(date), MONTH(date) ORDER BY DAY(date) DESC) AS rowNum FROM " \
"dbo_paststatistics WHERE date BETWEEN {} AND {} ) z " \
"WHERE rowNum = 1 AND ( MONTH(z.date) = 3 OR MONTH(z.date) = 6 OR MONTH(z.date) = 9 OR " \
"MONTH(z.date) = 12)".format(train_date, start_date)
# Then we execute the query and store the returned values in instrumentStats, and grab the last n stats from the dataframe as we are only using n datapoints
instrumentStats = pd.read_sql_query(query, self.engine)
instrumentStats = instrumentStats.tail(n)
#Best weightings will be used to store the best weightings for each instrument
best_weightings = [0, 0, 0]
#Best avg error will be used to store the best average percent error for each isntrument
best_avg_error = -1
#Best trend error will be used to store the best trend error for each instrument
best_trend_error = -1
#Best forecast prices will be used to store the forecast prices for the best weightings to store them in a database for visual comparison later
best_forecast_prices = []
# We now iterate through all 3 different possible weightings
for weight in numpy.arange(-3, 3, .25):
for uweight in numpy.arange(-3, 3, .25):
for iweight in numpy.arange(-3, .3, .25):
# We intialize a list to store the resulting forecasted prices to compare in another function
stat_check = []
isFirst = True
# This is the actual calculation of MSF3 where we store the result in stat_check to compare to actual instrument prices
for i in range(n):
if isFirst:
#Change to pluses and test accuracy
stat = vars['GDP'][i] * weight - vars['UR'][i] * uweight + vars['IR'][i] * iweight - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * instrumentStats['close'].iloc[n-1]) + instrumentStats['close'].iloc[n-1]
stat_check.append(stat)
temp_price = stat
isFirst = False
else:
stat = vars['GDP'][i] * weight - (vars['UR'][i] * uweight + vars['IR'][i] * iweight) - (
vars['MI'][i] * vars['MI'][i])
stat = (stat * temp_price) + temp_price
stat_check.append(stat)
temp_price = stat
# We call to the weight check function using the list of forecasted prices, the current instrument id, the amount of datapoints we are working with, and the name of the function we are testing
# It then returns the average percent error and trend error for the forecasted prices, as well as the dates we are forecasting for so we can insert them into the visualize table
temp_avg_error, temp_trend_error, dates = weight_check(DBEngine().mysql_engine(), stat_check, 3, n, 'past', start_date, end_date)
# Check to see if the best_avg_error has been initialized to a valid average percent error, if not then no average error or trend error has been calculated yet
if (best_avg_error < 0):
# If so store the average percent error, the best weightings, best trend error, and the resulting forecasted prices for comparison with other weightings
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Otherwise check if the newly calculated average percent error is worse than the newly calculated one
elif (best_avg_error > temp_avg_error):
# And if so set the values for all the relevant variables
best_avg_error = temp_avg_error
best_weightings = [weight, uweight, iweight]
best_trend_error = temp_trend_error
best_forecast_prices = stat_check
# Print statements to view the average percent error, trend error, and best weightings
print("The lowest avg percent error is %.7f%% for instrumentID %d" % (best_avg_error * 100, 3), ' for function: MSF2 Past Dates')
print("The weightings are: ", best_weightings, ' for function: MSF2 Past Dates')
print('The trend accuracy is: ', best_trend_error)
# initializes weightings dictionary as the best weightings found for each instrument id
weightings[3] = best_weightings
# visual_comparisons will be used to store the past forecasted prices so we can visualize them compared to actual instrument prices on a graph
visual_comparisons = []
for k in range(n):
visual_comparisons.append([dates[k], 3, best_forecast_prices[k], 'MSF2 Past Dates'])
query = 'SELECT algorithmcode FROM dbo_tempvisualize'
setWeightingsCheck = | pd.read_sql_query(query, self.engine) | pandas.read_sql_query |
"""
To extract compile time and runtime data from evo-suite dataset
Version 0.3.0
- Project metric computation has been omitted.
To be used in CodART project
"""
import multiprocessing
import sys
import os
import subprocess
import threading
from collections import Counter
from functools import wraps
import warnings
from deprecated import deprecated
import re
import math
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_classif
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.ensemble import IsolationForest
from imblearn.combine import SMOTEENN, SMOTETomek
from imblearn.over_sampling import SMOTE, ADASYN
# https://scitools.com/support/python-api/
# Python 3.8 and newer require the user add a call to os.add_dll_directory(“SciTools/bin/“
# os.add_dll_directory('C:/Program Files/SciTools/bin/pc-win64')
sys.path.insert(0, 'D:/program files/scitools/bin/pc-win64/python')
try:
import understand
except ModuleNotFoundError:
# Error handling
pass
from . import metrics_names
from naming import UnderstandUtility
from metrics.metrics_jcode_odor import JCodeOdorMetric
from metrics.source_code_metrics import *
import metrics.metrics_names
__version__ = '0.4.0'
__author__ = 'Morteza'
def check_compute_metrics_by_class_list(project_name: str = None, database=None, class_list=None,
csv_path=None):
class_entities = PreProcess.read_project_classes(project_name=project_name, db=database, df=class_list, )
print('Number of classes in {0}: {1}'.format(project_name, len(class_entities)))
columns = ['Project', 'NumberOfClass']
columns.extend(TestabilityMetrics.get_all_metrics_names())
dummy_data = [0 for i in range(0, len(columns) - 2)]
dummy_data.insert(0, project_name)
dummy_data.insert(1, len(class_entities))
df = pd.DataFrame(data=[dummy_data], columns=columns)
# print(df)
# print(columns)
df.to_csv(csv_path + project_name + '.csv', index=False, )
class TestabilityMetrics:
"""
"""
@classmethod
def get_class_ordinary_metrics_names(cls) -> list:
return metrics_names.class_ordinary_metrics_names
@classmethod
def get_class_lexicon_metrics_names(cls) -> list:
return metrics_names.class_lexicon_metrics_names
@classmethod
def get_package_metrics_names(cls) -> list:
return metrics_names.package_metrics_names
@classmethod
def get_project_metrics_names(cls) -> list:
return metrics_names.project_metrics_names
@classmethod
def get_all_metrics_names(cls) -> list:
metrics = list()
# print('project_metrics number: ', len(TestabilityMetrics.get_project_metrics_names()))
# for metric_name in TestabilityMetrics.get_project_metrics_names():
# metrics.append('PJ_' + metric_name)
# print('package_metrics number: ', len(TestabilityMetrics.get_package_metrics_names()))
for metric_name in TestabilityMetrics.get_package_metrics_names():
metrics.append('PK_' + metric_name)
# SOOTI is now corrected.
# print('class_lexicon_metrics number: ', len(TestabilityMetrics.get_class_lexicon_metrics_names()))
for metric_name in TestabilityMetrics.get_class_lexicon_metrics_names():
metrics.append('CSLEX_' + metric_name)
# print('class_ordinary_metrics number: ', len(TestabilityMetrics.get_class_ordinary_metrics_names()))
for metric_name in TestabilityMetrics.get_class_ordinary_metrics_names():
metrics.append('CSORD_' + metric_name)
# print('All available metrics: {0}'.format(len(metrics)))
return metrics
@classmethod
def get_all_primary_metrics_names(cls) -> list:
primary_metrics_names = list()
for metric_name in metrics_names.project_metrics_names_primary:
primary_metrics_names.append('PJ_' + metric_name)
for metric_name in metrics_names.package_metrics_names_primary:
primary_metrics_names.append('PK_' + metric_name)
for metric_name in metrics_names.class_ordinary_metrics_names_primary:
primary_metrics_names.append('CSORD_' + metric_name)
for metric_name in metrics_names.class_lexicon_metrics_names:
primary_metrics_names.append('CSLEX_' + metric_name)
return primary_metrics_names
@classmethod
def compute_java_class_metrics2(cls, db=None, entity=None):
"""
Strategy #2: Take a list of all classes and search for target class
Which strategy is used for our final setting? I do not know!
:param db:
:param entity:
:return:
"""
# 1. Understand built-in class metrics
class_metrics = entity.metric(entity.metrics())
# print('number of metrics for class "{0}": {1}, and metrics: {2}'.format(entity.longname(),
# len(class_metrics), class_metrics), )
# for i, metric in enumerate(class_metrics.keys()):
# print(i + 1, ': ', metric, class_metrics[metric])
# print(class_metrics['AvgCyclomatic'])
# 2. Systematically created metrics
j_code_odor_metric = JCodeOdorMetric()
method_list = UnderstandUtility.get_method_of_class_java2(db=db, class_name=entity.longname())
if method_list is None:
raise TypeError('method_list is none for class "{}"'.format(entity.longname()))
# 2.1 CSCC
class_cyclomatic_list = list()
class_cyclomatic_namm_list = list()
class_cyclomatic_strict_list = list()
class_cyclomatic_strict_namm_list = list()
class_cyclomatic_modified_list = list()
class_cyclomatic_modified_namm_list = list()
class_essential_list = list()
class_essential_namm_list = list()
for method in method_list:
class_cyclomatic_list.append(method.metric(['Cyclomatic'])['Cyclomatic'])
class_cyclomatic_strict_list.append(method.metric(['CyclomaticStrict'])['CyclomaticStrict'])
class_cyclomatic_modified_list.append(method.metric(['CyclomaticModified'])['CyclomaticModified'])
class_essential_list.append(method.metric(['Essential'])['Essential'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
class_cyclomatic_namm_list.append(method.metric(['Cyclomatic'])['Cyclomatic'])
class_cyclomatic_strict_namm_list.append(method.metric(['CyclomaticStrict'])['CyclomaticStrict'])
class_cyclomatic_modified_namm_list.append(method.metric(['CyclomaticModified'])['CyclomaticModified'])
class_essential_namm_list.append(method.metric(['Essential'])['Essential'])
cls.remove_none_from_lists([class_cyclomatic_list, class_cyclomatic_namm_list,
class_cyclomatic_strict_list, class_cyclomatic_strict_namm_list,
class_cyclomatic_modified_list, class_cyclomatic_modified_namm_list,
class_essential_list, class_essential_namm_list])
# CSCC
# 2.1.13
class_metrics.update({'MinCyclomatic': min(class_cyclomatic_list)})
# 2.1.14
class_metrics.update({'MinCyclomaticStrict': min(class_cyclomatic_strict_list)})
# 2.1.15
class_metrics.update({'MinCyclomaticModified': min(class_cyclomatic_modified_list)})
# 2.1.16
class_metrics.update({'MinEssential': min(class_essential_list)})
# 2.1.17
class_metrics.update({'SDCyclomatic': np.std(class_cyclomatic_list)})
# 2.1.18
class_metrics.update({'SDCyclomaticStrict': np.std(class_cyclomatic_strict_list)})
# 2.1.19
class_metrics.update({'SDCyclomaticModified': np.std(class_cyclomatic_modified_list)})
# 2.1.20
class_metrics.update({'SDEssential': np.std(class_essential_list)})
class_metrics.update({'LogCyclomatic': math.log10(sum(class_cyclomatic_list) + 1)})
class_metrics.update({'LogCyclomaticStrict': math.log10(sum(class_cyclomatic_strict_list) + 1)})
class_metrics.update({'LogCyclomaticModified': math.log10(sum(class_cyclomatic_modified_list) + 1)})
class_metrics.update({'LogEssential': math.log10(sum(class_essential_list) + 1)})
# CSCCNAMM
# 2.1.21
class_metrics.update({'SumCyclomaticNAMM': sum(class_cyclomatic_namm_list)})
# 2.1.22
class_metrics.update({'SumCyclomaticStrictNAMM': sum(class_cyclomatic_strict_namm_list)})
# 2.1.23
class_metrics.update({'SumCyclomaticModifiedNAMM': sum(class_cyclomatic_modified_namm_list)})
# 2.1.24
class_metrics.update({'SumEssentialNAMM': sum(class_essential_namm_list)})
# 2.1.25
class_metrics.update({'MaxCyclomaticNAMM': max(class_cyclomatic_namm_list)})
# 2.1.26
class_metrics.update({'MaxCyclomaticStrictNAMM': max(class_cyclomatic_strict_namm_list)})
# 2.1.27
class_metrics.update({'MaxCyclomaticModifiedNAMM': max(class_cyclomatic_modified_namm_list)})
# 2.1.28
class_metrics.update({'MaxEssentialNAMM': max(class_essential_namm_list)})
# 2.1.29
class_metrics.update({'AvgCyclomaticNAMM': sum(class_cyclomatic_namm_list) / len(class_cyclomatic_namm_list)})
# 2.1.30
class_metrics.update({'AvgCyclomaticStrictNAMM': sum(class_cyclomatic_strict_namm_list) / len(
class_cyclomatic_strict_namm_list)})
# 2.1.31
class_metrics.update({'AvgCyclomaticModifiedNAMM': sum(class_cyclomatic_modified_namm_list) / len(
class_cyclomatic_modified_namm_list)})
# 2.1.32
class_metrics.update({'AvgEssentialNAMM': sum(class_essential_namm_list) / len(class_essential_namm_list)})
# 2.1.33
class_metrics.update({'MinCyclomaticNAMM': min(class_cyclomatic_namm_list)})
# 2.1.34
class_metrics.update({'MinCyclomaticStrictNAMM': min(class_cyclomatic_strict_namm_list)})
# 2.1.35
class_metrics.update({'MinCyclomaticModifiedNAMM': min(class_cyclomatic_modified_namm_list)})
# 2.1.36
class_metrics.update({'MinEssentialNAMM': min(class_essential_namm_list)})
# 2.1.37
class_metrics.update({'SDCyclomaticNAMM': np.std(class_cyclomatic_namm_list)})
# 2.1.38
class_metrics.update({'SDCyclomaticStrictNAMM': np.std(class_cyclomatic_strict_namm_list)})
# 2.1.39
class_metrics.update({'SDCyclomaticModifiedNAMM': np.std(class_cyclomatic_modified_namm_list)})
# 2.1.40
class_metrics.update({'SDEssentialNAMM': np.std(class_essential_namm_list)})
# 2.2 CSNOP (10)
#
parameters_length_list = list()
parameters_length_namm_list = list()
# number_of_parameters = 0
# print('method list', len(method_list))
for method in method_list:
# if method.library() != "Standard":
# print('method params', method.longname(), '-->', method.parameters())
params = method.parameters().split(',')
if len(params) == 1:
if params[0] == ' ' or params[0] == '' or params[0] is None:
parameters_length_list.append(0)
else:
parameters_length_list.append(1)
else:
parameters_length_list.append(len(params))
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
if len(params) == 1:
if params[0] == ' ' or params[0] == '' or params[0] is None:
parameters_length_namm_list.append(0)
else:
parameters_length_namm_list.append(1)
else:
parameters_length_namm_list.append(len(params))
cls.remove_none_from_lists([parameters_length_list, parameters_length_namm_list])
# print('number of parameters', number_of_parameters)
# CSNOP
# 2.2.1
class_metrics.update({'SumCSNOP': sum(parameters_length_list)})
# 2.2.2
class_metrics.update({'MaxCSNOP': max(parameters_length_list)})
# 2.2.3
class_metrics.update({'MinCSNOP': min(parameters_length_list)})
# 2.2.4
class_metrics.update({'AvgCSNOP': sum(parameters_length_list) / len(parameters_length_list)})
# 2.2.5
class_metrics.update({'SDCSNOP': np.std(parameters_length_list)})
# CSNOP_NAMM
# 2.2.6
class_metrics.update({'SumCSNOPNAMM': sum(parameters_length_namm_list)})
# 2.2.7
class_metrics.update({'MaxCSNOPNAMM': max(parameters_length_namm_list)})
# 2.2.8
class_metrics.update({'MinCSNOPNAMM': min(parameters_length_namm_list)})
# 2.2.9
class_metrics.update({'AvgCSNOPNAMM': sum(parameters_length_namm_list) / len(parameters_length_namm_list)})
# 2.2.10
class_metrics.update({'SDCSNOPNAMM': np.std(parameters_length_namm_list)})
# 2.3 SCLOC (30)
#
line_of_code_list = list()
line_of_code_namm_list = list()
line_of_code_decl_list = list()
line_of_code_decl_namm_list = list()
line_of_code_exe_list = list()
line_of_code_exe_namm_list = list()
for method in method_list:
line_of_code_list.append(method.metric(['CountLineCode'])['CountLineCode'])
line_of_code_decl_list.append(method.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
line_of_code_exe_list.append(method.metric(['CountLineCodeExe'])['CountLineCodeExe'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
line_of_code_namm_list.append(method.metric(['CountLineCode'])['CountLineCode'])
line_of_code_decl_namm_list.append(method.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
line_of_code_exe_namm_list.append(method.metric(['CountLineCodeExe'])['CountLineCodeExe'])
cls.remove_none_from_lists([line_of_code_list, line_of_code_namm_list,
line_of_code_decl_list, line_of_code_decl_namm_list,
line_of_code_exe_list, line_of_code_exe_namm_list])
# CSLOC_All
# 2.3.5
class_metrics.update({'AvgLineCodeDecl': sum(line_of_code_decl_list) / len(line_of_code_decl_list)})
# 2.3.6
class_metrics.update({'AvgLineCodeExe': sum(line_of_code_exe_list) / len(line_of_code_exe_list)})
# 2.3.7
class_metrics.update({'MaxLineCode': max(line_of_code_list)})
# 2.3.8
class_metrics.update({'MaxLineCodeDecl': max(line_of_code_decl_list)})
# 2.3.9
class_metrics.update({'MaxLineCodeExe': max(line_of_code_exe_list)})
# 2.3.10
class_metrics.update({'MinLineCode': min(line_of_code_list)})
# 2.3.11
class_metrics.update({'MinLineCodeDecl': min(line_of_code_decl_list)})
# 2.3.12
class_metrics.update({'MinLineCodeExe': min(line_of_code_exe_list)})
# 2.3.13
class_metrics.update({'SDLineCode': np.std(line_of_code_list)})
# 2.3.14
class_metrics.update({'SDLineCodeDecl': np.std(line_of_code_decl_list)})
# 2.3.15
class_metrics.update({'SDLineCodeExe': np.std(line_of_code_exe_list)})
class_metrics.update({'LogLineCode': math.log10(sum(line_of_code_list) + 1)})
class_metrics.update({'LogLineCodeDecl': math.log10(sum(line_of_code_decl_list) + 1)})
class_metrics.update({'LogLineCodeExe': math.log10(sum(line_of_code_exe_list) + 1)})
# CSLOC_NAMM
# 2.3.16
class_metrics.update({'CountLineCodeNAMM': sum(line_of_code_namm_list)})
# 2.3.17
class_metrics.update({'CountLineCodeDeclNAMM': sum(line_of_code_decl_namm_list)})
# print('!@#', sum(line_of_code_decl_namm_list))
# quit()
# 2.3.18
class_metrics.update({'CountLineCodeExeNAMM': sum(line_of_code_exe_namm_list)})
# 2.3.19
class_metrics.update({'AvgLineCodeNAMM': sum(line_of_code_namm_list) / len(line_of_code_namm_list)})
# 2.3.20
class_metrics.update(
{'AvgLineCodeDeclNAMM': sum(line_of_code_decl_namm_list) / len(line_of_code_decl_namm_list)})
# 2.3.21
class_metrics.update({'AvgLineCodeExeNAMM': sum(line_of_code_exe_namm_list) / len(line_of_code_exe_namm_list)})
# 2.3.22
class_metrics.update({'MaxLineCodeNAMM': max(line_of_code_namm_list)})
# 2.3.23
class_metrics.update({'MaxLineCodeDeclNAMM': max(line_of_code_decl_namm_list)})
# 2.3.24
class_metrics.update({'MaxLineCodeExeNAMM': max(line_of_code_exe_namm_list)})
# 2.3.25
class_metrics.update({'MinLineCodeNAMM': min(line_of_code_namm_list)})
# 2.3.26
class_metrics.update({'MinLineCodeDeclNAMM': min(line_of_code_decl_namm_list)})
# 2.3.27
class_metrics.update({'MinLineCodeExeNAMM': min(line_of_code_exe_namm_list)})
# 2.3.28
class_metrics.update({'SDLineCodeNAMM': np.std(line_of_code_namm_list)})
# 2.3.29
class_metrics.update({'SDLineCodeDeclNAMM': np.std(line_of_code_decl_namm_list)})
# print('!@#', np.std(line_of_code_decl_namm_list))
# quit()
# 2.3.30
class_metrics.update({'SDLineCodeExeNAMM': np.std(line_of_code_exe_namm_list)})
# ----------------------------------------------------------------
# 2.4 CSNOST (3-->30)
# To be completed in future work
number_of_stmt_list = list()
number_of_stmt_namm_list = list()
number_of_stmt_decl_list = list()
number_of_stmt_decl_namm_list = list()
number_of_stmt_exe_list = list()
number_of_stmt_exe_namm_list = list()
for method in method_list:
number_of_stmt_list.append(method.metric(['CountStmt'])['CountStmt'])
number_of_stmt_decl_list.append(method.metric(['CountStmtDecl'])['CountStmtDecl'])
number_of_stmt_exe_list.append(method.metric(['CountStmtExe'])['CountStmtExe'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
number_of_stmt_namm_list.append(method.metric(['CountStmt'])['CountStmt'])
number_of_stmt_decl_namm_list.append(method.metric(['CountStmtDecl'])['CountStmtDecl'])
number_of_stmt_exe_namm_list.append(method.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([number_of_stmt_list, number_of_stmt_namm_list,
number_of_stmt_decl_list, number_of_stmt_decl_namm_list,
number_of_stmt_exe_list, number_of_stmt_exe_namm_list])
# CSNOST_All
# 2.4.4
class_metrics.update({'AvgStmt': sum(number_of_stmt_list) / len(number_of_stmt_list)})
# 2.4.5
class_metrics.update({'AvgStmtDecl': sum(number_of_stmt_decl_list) / len(number_of_stmt_decl_list)})
# 2.4.6
class_metrics.update({'AvgStmtExe': sum(number_of_stmt_exe_list) / len(number_of_stmt_exe_list)})
# 2.4.7
class_metrics.update({'MaxStmt': max(number_of_stmt_list)})
# 2.4.8
class_metrics.update({'MaxStmtDecl': max(number_of_stmt_decl_list)})
# 2.4.9
class_metrics.update({'MaxStmtExe': max(number_of_stmt_exe_list)})
# 2.4.10
class_metrics.update({'MinStmt': min(number_of_stmt_list)})
# 2.4.11
class_metrics.update({'MinStmtDecl': min(number_of_stmt_decl_list)})
# 2.4.12
class_metrics.update({'MinStmtExe': min(number_of_stmt_exe_list)})
# 2.4.13
class_metrics.update({'SDStmt': np.std(number_of_stmt_list)})
# 2.4.14
class_metrics.update({'SDStmtDecl': np.std(number_of_stmt_decl_list)})
# 2.4.15
class_metrics.update({'SDStmtExe': np.std(number_of_stmt_exe_list)})
class_metrics.update({'LogStmt': math.log10(sum(number_of_stmt_list) + 1)})
class_metrics.update({'LogStmtDecl': math.log10(sum(number_of_stmt_decl_list) + 1)})
class_metrics.update({'LogStmtExe': math.log10(sum(number_of_stmt_exe_list) + 1)})
# CSNOST_NAMM
# 2.4.16
class_metrics.update({'CountStmtNAMM': sum(number_of_stmt_namm_list)})
# 2.4.17
class_metrics.update({'CountStmtDeclNAMM': sum(number_of_stmt_decl_namm_list)})
# 2.4.18
class_metrics.update({'CountStmtExeNAMM': sum(number_of_stmt_exe_namm_list)})
# 2.4.19
class_metrics.update({'AvgStmtNAMM': sum(number_of_stmt_namm_list) / len(number_of_stmt_namm_list)})
# 2.4.20
class_metrics.update(
{'AvgStmtDeclNAMM': sum(number_of_stmt_decl_namm_list) / len(number_of_stmt_decl_namm_list)})
# 2.4.21
class_metrics.update({'AvgStmtExeNAMM': sum(number_of_stmt_exe_namm_list) / len(number_of_stmt_exe_namm_list)})
# 2.4.22
class_metrics.update({'MaxStmtNAMM': max(number_of_stmt_namm_list)})
# 2.4.23
class_metrics.update({'MaxStmtDeclNAMM': max(number_of_stmt_decl_namm_list)})
# 2.4.24
class_metrics.update({'MaxStmtExeNAMM': max(number_of_stmt_exe_namm_list)})
# 2.4.25
class_metrics.update({'MinStmtNAMM': min(number_of_stmt_namm_list)})
# 2.4.26
class_metrics.update({'MinStmtDeclNAMM': min(number_of_stmt_decl_namm_list)})
# 2.4.27
class_metrics.update({'MinStmtExeNAMM': min(number_of_stmt_exe_namm_list)})
# 2.4.28
class_metrics.update({'SDStmtNAMM': np.std(number_of_stmt_namm_list)})
# 2.4.29
class_metrics.update({'SDStmtDeclNAMM': np.std(number_of_stmt_decl_namm_list)})
# 2.4.30
class_metrics.update({'SDStmtExeNAMM': np.std(number_of_stmt_exe_namm_list)})
# Class number of not accessor or mutator methods
# Class max_nesting (4)
CSNOMNAMM = 0
max_nesting_list = list()
for method in method_list:
max_nesting_list.append(method.metric(['MaxNesting'])['MaxNesting'])
if not j_code_odor_metric.is_accesor_or_mutator(method_entity=method):
CSNOMNAMM += 1
cls.remove_none_from_lists([max_nesting_list])
class_metrics.update({'CSNOMNAMM': CSNOMNAMM})
class_metrics.update({'MinNesting': min(max_nesting_list)})
class_metrics.update({'AvgNesting': sum(max_nesting_list) / len(max_nesting_list)})
class_metrics.update({'SDNesting': np.std(max_nesting_list)})
# Custom (JCodeOdor) coupling metrics
class_metrics.update({'RFC': j_code_odor_metric.RFC(class_name=entity)})
class_metrics.update({'FANIN': j_code_odor_metric.FANIN(db=db, class_entity=entity)})
class_metrics.update({'FANOUT': j_code_odor_metric.FANOUT(db=db, class_entity=entity)})
class_metrics.update({'ATFD': UnderstandUtility.ATFD(db=db, class_entity=entity)}) ### not implement
class_metrics.update({'CFNAMM': j_code_odor_metric.CFNAMM_Class(class_name=entity)})
class_metrics.update({'DAC': UnderstandUtility.get_data_abstraction_coupling(db=db, class_entity=entity)})
class_metrics.update({'NumberOfMethodCalls': UnderstandUtility.number_of_method_call(class_entity=entity)})
# Visibility metrics
# Understand built-in metrics plus one custom metric.
class_metrics.update({'CSNOAMM': j_code_odor_metric.NOMAMM(class_entity=entity)})
# Inheritance metrics
class_metrics.update({'NIM': j_code_odor_metric.NIM(class_name=entity)})
class_metrics.update({'NMO': j_code_odor_metric.NMO(class_name=entity)})
class_metrics.update({'NOII': UnderstandUtility.NOII(db=db)}) # Not implemented
# ---------------------------------------
# New added metric (version 0.3.0, dataset 0.5.0)
class_count_path_list = list()
class_count_path_log_list = list()
class_knots_list = list()
for method in method_list:
class_count_path_list.append(method.metric(['CountPath'])['CountPath'])
class_count_path_log_list.append(method.metric(['CountPathLog'])['CountPathLog'])
class_knots_list.append(method.metric(['Knots'])['Knots'])
cls.remove_none_from_lists([class_count_path_list, class_count_path_log_list, class_knots_list])
class_metrics.update({'SumCountPath': sum(class_count_path_list)})
class_metrics.update({'MinCountPath': min(class_count_path_list)})
class_metrics.update({'MaxCountPath': max(class_count_path_list)})
class_metrics.update({'AvgCountPath': sum(class_count_path_list) / len(class_count_path_list)})
class_metrics.update({'SDCountPath': np.std(class_count_path_list)})
class_metrics.update({'SumCountPathLog': sum(class_count_path_log_list)})
class_metrics.update({'MinCountPathLog': min(class_count_path_log_list)})
class_metrics.update({'MaxCountPathLog': max(class_count_path_log_list)})
class_metrics.update({'AvgCountPathLog': sum(class_count_path_log_list) / len(class_count_path_log_list)})
class_metrics.update({'SDCountPathLog': np.std(class_count_path_log_list)})
class_metrics.update({'SumKnots': sum(class_knots_list)})
class_metrics.update({'MinKnots': min(class_knots_list)})
class_metrics.update({'MaxKnots': max(class_knots_list)})
class_metrics.update({'AvgKnots': sum(class_knots_list) / len(class_knots_list)})
class_metrics.update({'SDKnots': np.std(class_knots_list)})
constructor = UnderstandUtility.get_constructor_of_class_java(db=db, class_name=entity.longname())
class_metrics.update({'NumberOfClassConstructors': len(constructor)})
class_metrics.update({'NumberOfDepends': len(entity.depends())})
class_metrics.update({'NumberOfDependsBy': len(entity.dependsby())})
class_metrics.update({'NumberOfClassInItsFile': len(
UnderstandUtility.get_number_of_class_in_file_java(db=db, class_entity=entity))})
return class_metrics
@classmethod
def compute_java_class_metrics_lexicon(cls, db=None, entity=None):
"""
:param db:
:param entity:
:return:
"""
class_lexicon_metrics_dict = dict()
# for ib in entity.ib():
# print('entity ib', ib)
# Compute lexicons
tokens_list = list()
identifiers_list = list()
keywords_list = list()
operators_list = list()
return_and_print_count = 0
return_and_print_kw_list = ['return', 'print', 'printf', 'println', 'write', 'writeln']
condition_count = 0
condition_kw_list = ['if', 'for', 'while', 'switch', '?', 'assert', ]
uncondition_count = 0
uncondition_kw_list = ['break', 'continue', ]
exception_count = 0
exception_kw_list = ['try', 'catch', 'throw', 'throws', 'finally', ]
new_count = 0
new_count_kw_list = ['new']
super_count = 0
super_count_kw_list = ['super']
dots_count = 0
try:
# print('ec', entity.parent().id())
# source_file_entity = db.ent_from_id(entity.parent().id())
# print('file', type(source_file_entity), source_file_entity.longname())
for lexeme in entity.lexer(show_inactive=False):
# print(lexeme.text(), ': ', lexeme.token())
tokens_list.append(lexeme.text())
if lexeme.token() == 'Identifier':
identifiers_list.append(lexeme.text())
if lexeme.token() == 'Keyword':
keywords_list.append(lexeme.text())
if lexeme.token() == 'Operator':
operators_list.append(lexeme.text())
if lexeme.text() in return_and_print_kw_list:
return_and_print_count += 1
if lexeme.text() in condition_kw_list:
condition_count += 1
if lexeme.text() in uncondition_kw_list:
uncondition_count += 1
if lexeme.text() in exception_kw_list:
exception_count += 1
if lexeme.text() in new_count_kw_list:
new_count += 1
if lexeme.text() in super_count_kw_list:
super_count += 1
if lexeme.text() == '.':
dots_count += 1
except:
raise RuntimeError('Error in computing class lexical metrics for class "{0}"'.format(entity.longname()))
number_of_assignments = operators_list.count('=')
number_of_operators_without_assignments = len(operators_list) - number_of_assignments
number_of_unique_operators = len(set(list(filter('='.__ne__, operators_list))))
class_lexicon_metrics_dict.update({'NumberOfTokens': len(tokens_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueTokens': len(set(tokens_list))})
class_lexicon_metrics_dict.update({'NumberOfIdentifies': len(identifiers_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueIdentifiers': len(set(identifiers_list))})
class_lexicon_metrics_dict.update({'NumberOfKeywords': len(keywords_list)})
class_lexicon_metrics_dict.update({'NumberOfUniqueKeywords': len(set(keywords_list))})
class_lexicon_metrics_dict.update(
{'NumberOfOperatorsWithoutAssignments': number_of_operators_without_assignments})
class_lexicon_metrics_dict.update({'NumberOfAssignments': number_of_assignments})
class_lexicon_metrics_dict.update({'NumberOfUniqueOperators': number_of_unique_operators})
class_lexicon_metrics_dict.update({'NumberOfDots': dots_count})
class_lexicon_metrics_dict.update({'NumberOfSemicolons': entity.metric(['CountSemicolon'])['CountSemicolon']})
class_lexicon_metrics_dict.update({'NumberOfReturnAndPrintStatements': return_and_print_count})
class_lexicon_metrics_dict.update({'NumberOfConditionalJumpStatements': condition_count})
class_lexicon_metrics_dict.update({'NumberOfUnConditionalJumpStatements': uncondition_count})
class_lexicon_metrics_dict.update({'NumberOfExceptionStatements': exception_count})
class_lexicon_metrics_dict.update({'NumberOfNewStatements': new_count})
class_lexicon_metrics_dict.update({'NumberOfSuperStatements': super_count})
# print('Class lexicon metrics:', class_lexicon_metrics_dict)
return class_lexicon_metrics_dict
@classmethod
def compute_java_package_metrics(cls, db=None, class_name: str = None):
# print('ib', entity.ib())
# package_name = ''
# Find package: strategy 1
# for ib in entity.ib():
# if ib.find('Package:') != -1:
# sp = ib.split(':')
# print('entity ib', sp[1][1:-1])
# package_name = sp[1][1:-1]
# Find package: strategy 2: Dominated strategy
class_name_list = class_name.split('.')[:-1]
package_name = '.'.join(class_name_list)
# print('package_name string', package_name)
package_list = db.lookup(package_name + '$', 'Package')
if package_list is None:
return None
if len(package_list) == 0: # if len != 1 return None!
return None
package = package_list[0]
# print('kind:', package.kind())
print('Computing package metrics for class: "{0}" in package: "{1}"'.format(class_name, package.longname()))
# Print info
# print('package metrics')
package_metrics = package.metric(package.metrics())
# print('number of metrics:', len(metrics), metrics)
# for i, metric in enumerate(metrics.keys()):
# print(i + 1, ': ', metric, metrics[metric])
# print('class metrics')
# metrics2 = entity.metric(entity.metrics())
# print('number of metrics:', len(metrics), metrics2)
# for i, metric2 in enumerate(metrics.keys()):
# print(i + 1, ': ', metric2, metrics[metric2])
#
# print(package.refs('Definein'))
# for defin in package.refs('Definein'):
# print('kind', defin.ent().kind())
# print(defin, '-->', defin.ent().ents('Java Define', 'Class'))
# metrics = entity.metric(defin.ent().metrics())
# print('number of metrics in file:', len(metrics), metrics)
# for i, metric in enumerate(metrics.keys()):
# print(i + 1, ': ', metric, metrics[metric])
classes_and_interfaces_list = UnderstandUtility.get_package_clasess_java(package_entity=package)
# print(classes_and_interfaces_list)
# quit()
# 2. Custom package metrics
# 2.1. PKLOC (15)
pk_loc_list = list()
pk_loc_decl_list = list()
pk_loc_exe_list = list()
for type_entity in classes_and_interfaces_list:
pk_loc_list.append(type_entity.metric(['CountLineCode'])['CountLineCode'])
pk_loc_decl_list.append(type_entity.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
pk_loc_exe_list.append(type_entity.metric(['CountLineCodeExe'])['CountLineCodeExe'])
cls.remove_none_from_lists([pk_loc_list, pk_loc_decl_list, pk_loc_exe_list])
try:
package_metrics.update({'AvgLineCodeDecl': sum(pk_loc_decl_list) / len(pk_loc_decl_list)})
package_metrics.update({'AvgLineCodeExe': sum(pk_loc_exe_list) / len(pk_loc_exe_list)})
package_metrics.update({'MaxLineCode': max(pk_loc_list)})
package_metrics.update({'MaxLineCodeDecl': max(pk_loc_decl_list)})
package_metrics.update({'MaxLineCodeExe': max(pk_loc_exe_list)})
package_metrics.update({'MinLineCode': min(pk_loc_list)})
package_metrics.update({'MinLineCodeDecl': min(pk_loc_decl_list)})
package_metrics.update({'MinLineCodeExe': min(pk_loc_exe_list)})
package_metrics.update({'SDLineCode': np.std(pk_loc_list)})
package_metrics.update({'SDLineCodeDecl': np.std(pk_loc_decl_list)})
package_metrics.update({'SDLineCodeExe': np.std(pk_loc_exe_list)})
except:
raise TypeError('Error happen when compute packege metric for class "{0}" and list "{1}"'.format(class_name,
pk_loc_decl_list))
# 2.2 PKNOS (15)
pk_stmt_list = list()
pk_stmt_decl_list = list()
pk_stmt_exe_list = list()
for type_entity in classes_and_interfaces_list:
pk_stmt_list.append(type_entity.metric(['CountStmt'])['CountStmt'])
pk_stmt_decl_list.append(type_entity.metric(['CountStmtDecl'])['CountStmtDecl'])
pk_stmt_exe_list.append(type_entity.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([pk_stmt_list, pk_stmt_decl_list, pk_stmt_exe_list])
package_metrics.update({'AvgStmt': sum(pk_stmt_decl_list) / len(pk_stmt_decl_list)})
package_metrics.update({'AvgStmtDecl': sum(pk_stmt_decl_list) / len(pk_stmt_decl_list)})
package_metrics.update({'AvgStmtExe': sum(pk_stmt_exe_list) / len(pk_stmt_exe_list)})
package_metrics.update({'MaxStmt': max(pk_stmt_list)})
package_metrics.update({'MaxStmtDecl': max(pk_stmt_decl_list)})
package_metrics.update({'MaxStmtExe': max(pk_stmt_exe_list)})
package_metrics.update({'MinStmt': min(pk_stmt_list)})
package_metrics.update({'MinStmtDecl': min(pk_stmt_decl_list)})
package_metrics.update({'MinStmtExe': min(pk_stmt_exe_list)})
package_metrics.update({'SDStmt': np.std(pk_stmt_list)})
package_metrics.update({'SDStmtDecl': np.std(pk_stmt_decl_list)})
package_metrics.update({'SDStmtExe': np.std(pk_stmt_exe_list)})
# 2.3 PKCC (20)
pk_cyclomatic_list = list()
pk_cyclomatic_namm_list = list()
pk_cyclomatic_strict_list = list()
pk_cyclomatic_strict_namm_list = list()
pk_cyclomatic_modified_list = list()
pk_cyclomatic_modified_namm_list = list()
pk_essential_list = list()
pk_essential_namm_list = list()
for type_entity in classes_and_interfaces_list:
pk_cyclomatic_list.append(type_entity.metric(['SumCyclomatic'])['SumCyclomatic'])
pk_cyclomatic_modified_list.append(type_entity.metric(['SumCyclomaticModified'])['SumCyclomaticModified'])
pk_cyclomatic_strict_list.append(type_entity.metric(['SumCyclomaticStrict'])['SumCyclomaticStrict'])
pk_essential_list.append(type_entity.metric(['SumEssential'])['SumEssential'])
cls.remove_none_from_lists(
[pk_cyclomatic_list, pk_cyclomatic_strict_list, pk_cyclomatic_modified_list, pk_essential_list])
package_metrics.update({'MinCyclomatic': min(pk_cyclomatic_list)})
package_metrics.update({'MinCyclomaticModified': min(pk_cyclomatic_modified_list)})
package_metrics.update({'MinCyclomaticStrict': min(pk_cyclomatic_strict_list)})
package_metrics.update({'MinEssential': min(pk_essential_list)})
package_metrics.update({'SDCyclomatic': np.std(pk_cyclomatic_list)})
package_metrics.update({'SDCyclomaticModified': np.std(pk_cyclomatic_modified_list)})
package_metrics.update({'SDCyclomaticStrict': np.std(pk_cyclomatic_strict_list)})
package_metrics.update({'SDEssential': np.std(pk_essential_list)})
# 2.4 PKNESTING (4)
pk_nesting_list = list()
for type_entity in classes_and_interfaces_list:
pk_nesting_list.append(type_entity.metric(['MaxNesting'])['MaxNesting'])
cls.remove_none_from_lists([pk_nesting_list])
package_metrics.update({'MinNesting': min(pk_nesting_list)})
package_metrics.update({'AvgNesting': sum(pk_nesting_list) / len(pk_nesting_list)})
package_metrics.update({'SDNesting': np.std(pk_nesting_list)})
# 2.5
# Other Size/Count metrics (understand built-in metrics)
# PKNOMNAMM: Package number of not accessor or mutator methods
j_code_odor = JCodeOdorMetric()
pk_not_accessor_and_mutator_methods_list = list()
pk_accessor_and_mutator_methods_list = list()
for type_entity in classes_and_interfaces_list:
pk_not_accessor_and_mutator_methods_list.append(j_code_odor.NOMNAMM(type_entity))
pk_accessor_and_mutator_methods_list.append(j_code_odor.NOMAMM(type_entity))
cls.remove_none_from_lists([pk_not_accessor_and_mutator_methods_list, pk_accessor_and_mutator_methods_list])
package_metrics.update({'PKNOMNAMM': sum(pk_not_accessor_and_mutator_methods_list)})
# 2.6 Visibility metrics
# Other Visibility metrics metrics (understand built-in metrics)
package_metrics.update({'PKNOAMM': sum(pk_accessor_and_mutator_methods_list)})
# To add other visibility metrics
# 2.7 Inheritance metrics
package_metrics.update({'PKNOI': len(UnderstandUtility.get_package_interfaces_java(package_entity=package))})
package_metrics.update(
{'PKNOAC': len(UnderstandUtility.get_package_abstract_class_java(package_entity=package))})
# print(len(package_metrics))
# print(package_metrics)
return package_metrics
@classmethod
def compute_java_project_metrics(cls, db):
project_metrics = db.metric(db.metrics())
# print('number of metrics:', len(project_metrics), project_metrics)
# for i, metric in enumerate( project_metrics.keys()):
# print(i + 1, ': ', metric, project_metrics[metric])
# print(project_metrics) # Print Understand built-in metrics
# 2 Custom project metrics
files = UnderstandUtility.get_project_files_java(db=db)
# 2.1 PJLOC (30)
pj_loc_list = list()
pj_loc_decl_list = list()
pj_loc_exe_list = list()
pj_stmt_list = list()
pj_stmt_decl_list = list()
pj_stmt_exe_list = list()
for file_entity in files:
pj_loc_list.append(file_entity.metric(['CountLineCode'])['CountLineCode'])
pj_loc_decl_list.append(file_entity.metric(['CountLineCodeDecl'])['CountLineCodeDecl'])
pj_loc_exe_list.append(file_entity.metric(['CountLineCodeExe'])['CountLineCodeExe'])
pj_stmt_list.append(file_entity.metric(['CountStmt'])['CountStmt'])
pj_stmt_decl_list.append(file_entity.metric(['CountStmtDecl'])['CountStmtDecl'])
pj_stmt_exe_list.append(file_entity.metric(['CountStmtExe'])['CountStmtExe'])
cls.remove_none_from_lists([pj_loc_list, pj_loc_decl_list, pj_loc_exe_list,
pj_stmt_list, pj_stmt_decl_list, pj_stmt_exe_list])
project_metrics.update({'AvgLineCodeDecl': sum(pj_loc_decl_list) / len(pj_loc_decl_list)})
project_metrics.update({'AvgLineCodeExe': sum(pj_loc_exe_list) / len(pj_loc_exe_list)})
project_metrics.update({'MaxLineCode': max(pj_loc_list)})
project_metrics.update({'MaxLineCodeDecl': max(pj_loc_decl_list)})
project_metrics.update({'MaxLineCodeExe': max(pj_loc_exe_list)})
project_metrics.update({'MinLineCode': min(pj_loc_list)})
project_metrics.update({'MinLineCodeDecl': min(pj_loc_decl_list)})
project_metrics.update({'MinLineCodeExe': min(pj_loc_exe_list)})
project_metrics.update({'SDLineCode': np.std(pj_loc_list)})
project_metrics.update({'SDLineCodeDecl': np.std(pj_loc_decl_list)})
project_metrics.update({'SDLineCodeExe': np.std(pj_loc_exe_list)})
# 2.2. PJNOST (15)
project_metrics.update({'AvgStmt': sum(pj_stmt_list) / len(pj_stmt_list)})
project_metrics.update({'AvgStmtDecl': sum(pj_stmt_decl_list) / len(pj_stmt_decl_list)})
project_metrics.update({'AvgStmtExe': sum(pj_stmt_exe_list) / len(pj_stmt_exe_list)})
project_metrics.update({'MaxStmt': max(pj_stmt_list)})
project_metrics.update({'MaxStmtDecl': max(pj_stmt_decl_list)})
project_metrics.update({'MaxStmtExe': max(pj_stmt_exe_list)})
project_metrics.update({'MinStmt': min(pj_stmt_list)})
project_metrics.update({'MinStmtDecl': min(pj_stmt_decl_list)})
project_metrics.update({'MinStmtExe': min(pj_stmt_exe_list)})
project_metrics.update({'SDStmt': np.std(pj_stmt_list)})
project_metrics.update({'SDStmtDecl': np.std(pj_stmt_decl_list)})
project_metrics.update({'SDStmtExe': np.std(pj_stmt_exe_list)})
# 2.3 Other Count/Size metrics
packages = db.ents('Java Package')
# print('number of packages', len(packages))
project_metrics.update({'NumberOfPackages': len(packages)})
j_code_odor = JCodeOdorMetric()
pj_number_of_method_namm = 0
for class_ in UnderstandUtility.get_project_classes_java(db=db):
pj_number_of_method_namm += j_code_odor.NOMNAMM(class_)
project_metrics.update({'PJNOMNAMM': pj_number_of_method_namm})
# 2.4 PJCC (20): Project cyclomatic complexity
pj_cyclomatic_list = list()
pj_cyclomatic_namm_list = list()
pj_cyclomatic_strict_list = list()
pj_cyclomatic_strict_namm_list = list()
pj_cyclomatic_modified_list = list()
pj_cyclomatic_modified_namm_list = list()
pj_essential_list = list()
pj_essential_namm_list = list()
for type_entity in files:
pj_cyclomatic_list.append(type_entity.metric(['SumCyclomatic'])['SumCyclomatic'])
pj_cyclomatic_modified_list.append(type_entity.metric(['SumCyclomaticModified'])['SumCyclomaticModified'])
pj_cyclomatic_strict_list.append(type_entity.metric(['SumCyclomaticStrict'])['SumCyclomaticStrict'])
pj_essential_list.append(type_entity.metric(['SumEssential'])['SumEssential'])
cls.remove_none_from_lists([pj_cyclomatic_list, pj_cyclomatic_strict_list,
pj_cyclomatic_modified_list, pj_essential_list])
project_metrics.update({'SumCyclomatic': sum(pj_cyclomatic_list)})
project_metrics.update({'SumCyclomaticModified': sum(pj_cyclomatic_modified_list)})
project_metrics.update({'SumCyclomaticStrict': sum(pj_cyclomatic_strict_list)})
project_metrics.update({'SumEssential': sum(pj_essential_list)})
project_metrics.update({'MaxCyclomatic': max(pj_cyclomatic_list)})
project_metrics.update({'MaxCyclomaticModified': max(pj_cyclomatic_modified_list)})
project_metrics.update({'MaxCyclomaticStrict': max(pj_cyclomatic_strict_list)})
project_metrics.update({'MaxEssential': max(pj_essential_list)})
project_metrics.update({'AvgCyclomatic': sum(pj_cyclomatic_list) / len(pj_cyclomatic_list)})
project_metrics.update(
{'AvgCyclomaticModified': sum(pj_cyclomatic_modified_list) / len(pj_cyclomatic_modified_list)})
project_metrics.update({'AvgCyclomaticStrict': sum(pj_cyclomatic_strict_list) / len(pj_cyclomatic_strict_list)})
project_metrics.update({'AvgEssential': sum(pj_essential_list) / len(pj_essential_list)})
project_metrics.update({'MinCyclomatic': min(pj_cyclomatic_list)})
project_metrics.update({'MinCyclomaticModified': min(pj_cyclomatic_modified_list)})
project_metrics.update({'MinCyclomaticStrict': min(pj_cyclomatic_strict_list)})
project_metrics.update({'MinEssential': min(pj_essential_list)})
project_metrics.update({'SDCyclomatic': np.std(pj_cyclomatic_list)})
project_metrics.update({'SDCyclomaticModified': np.std(pj_cyclomatic_modified_list)})
project_metrics.update({'SDCyclomaticStrict': np.std(pj_cyclomatic_strict_list)})
project_metrics.update({'SDEssential': np.std(pj_essential_list)})
# 2.4 PKNESTING (4)
pj_nesting_list = list()
for type_entity in files:
pj_nesting_list.append(type_entity.metric(['MaxNesting'])['MaxNesting'])
cls.remove_none_from_lists([pj_nesting_list])
project_metrics.update({'MinNesting': min(pj_nesting_list)})
project_metrics.update({'AvgNesting': sum(pj_nesting_list) / len(pj_nesting_list)})
project_metrics.update({'SDNesting': np.std(pj_nesting_list)})
# 3 Inheritance metrics
project_metrics.update({'PJNOI': len(UnderstandUtility.get_project_interfaces_java(db=db))})
project_metrics.update({'PJNAC': len(UnderstandUtility.get_project_abstract_classes_java(db=db))})
return project_metrics
@classmethod
def get_entity_kind(cls, db, class_name):
entity = db.lookup(class_name + '$', 'Type')
return entity[0].kindname()
@classmethod
def remove_none_from_lists(cls, lists: list = None):
for i, list_ in enumerate(lists):
if len(list_) == 0:
list_.append(0)
warnings.warn('Empty list passed!')
# else:
# list_ = [i for i in list_ if i is not None]
# if len(list_) == 0:
# list_.append(0)
# raise ValueError('Required data for systematic metric computation is not enough!')
# ------------------------------------------------------------------------
class PreProcess:
"""
"""
# -------------------------------------------
# Dataset creation API
@classmethod
def create_understand_database_from_project(cls, root_path=None):
# First path
# root_path = 'E:/LSSDS/EvoSuite/SF110-20130704-src/SF110-20130704-src/'
# Second path, after eliminating all test class form SF110
root_path = 'sf110_without_test/' # A place for both project sources and understand databases
# 'create -db C:\Users\NOLIMIT\Desktop\sbta -languages c# add C:\Users\NOLIMIT\Desktop\sbta analyze -all'
# {0}: understand_db_directory, {1}: understand_db_name, {2}: project_root_directory
cmd = 'und create -db {0}{1}.udb -languages java add {2} analyze -all'
# projects = [x[0] for x in os.walk(root_path)]
projects = [name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name))]
for project_ in projects:
command_ = cmd.format(root_path, project_, root_path + project_)
print('executing command {0}'.format(command_))
# returned_value_in_byte = subprocess.check_output(command_, shell=True)
os.system('cmd /c "{0}"'.format(command_))
# os.system('cmd / k "{0}"'.format(command_))
@classmethod
def extract_project_classes_all(cls, udbs_path, class_list_csv_path_root=r'class_list_csvs/'):
files = [f for f in os.listdir(udbs_path) if os.path.isfile(os.path.join(udbs_path, f))]
for f in files:
print('processing understand db file {0}:'.format(f))
db = understand.open(os.path.join(udbs_path, f))
cls.write_project_classes(project_name=f[:-4], db=db, csv_path=class_list_csv_path_root + f[:-4] + '.csv')
print('processing understand db file {0} was finished'.format(f))
db.close()
@classmethod
def extract_project_classes(cls, db):
classes_list = UnderstandUtility.get_project_classes_longnames_java(db=db)
print('-' * 75)
print('@understand', len(set(classes_list)), set(classes_list))
return classes_list
@classmethod
def write_project_classes(cls, project_name: str = None, db=None, csv_path: str = None):
classes = cls.extract_project_classes(db=db)
df = pd.DataFrame(columns=['Project', 'Class', 'Line', 'Branch', 'Mutation', 'Output', 'Exceptions', 'Tests'])
df['Project'] = [project_name for i in range(0, len(classes))]
df['Class'] = classes
df.to_csv(csv_path, index=False)
@classmethod
def read_project_classes(cls, project_name: str = None, db=None, df: pd.DataFrame = None):
df1 = df.loc[df.Project == project_name]
class_entities = list()
for index, row in df1.iterrows():
# Find relevant class entity
class_entity_ = UnderstandUtility.get_class_entity_by_name(db=db, class_name=row['Class'])
if class_entity_ is not None:
method_list = UnderstandUtility.get_method_of_class_java2(db=db, class_entity=class_entity_)
if method_list is not None:
class_entities.append(class_entity_)
else:
# We do not need a class without any method!
warnings.warn('Requested class with name "{0}" does not have any method!'.format(row['Class']))
else:
# if class not found it may be an enum, or interface so we simply ignore it for metric computation
warnings.warn('Requested class with name "{0}" was not found int the project!'.format(row['Class']))
return class_entities
@classmethod
def extract_metrics_and_coverage_all(cls, udbs_path: str = r'sf110_without_test',
class_list_csv_path: str = r'runtime_result/evosuit160_sf110_result_html_with_project.csv',
csvs_path: str = r'sf110_csvs_without_test_e3/',
):
df = pd.read_csv(class_list_csv_path, delimiter=',', index_col=False)
files = [f for f in os.listdir(udbs_path) if os.path.isfile(os.path.join(udbs_path, f))]
t = list()
p = list()
for i, f in enumerate(files):
print('processing understand db file {0}:'.format(f))
db = understand.open(os.path.join(udbs_path, f))
# cls.check_compute_metrics_by_class_list(project_name=f[:-4], database=db, class_list=df, csv_path=csvs_path)
# t.append(threading.Thread(target=cls.check_compute_metrics_by_class_list, args=(f[:-4], db, df, csvs_path, )))
# t[i].start()
# p.append(multiprocessing.Process(target=cls.check_compute_metrics_by_class_list, args=(f[:-4], db, df, csvs_path, )))
# p[i].start()
cls.compute_metrics_by_class_list(project_name=f[:-4], database=db, class_list=df, csv_path=csvs_path)
print('processing understand db file {0} was finished'.format(f))
db.close()
@classmethod
def check_compute_metrics_by_class_list(cls, project_name: str = None, database=None, class_list=None,
csv_path=None):
class_entities = cls.read_project_classes(project_name=project_name, db=database, df=class_list, )
print('Number of classes in {0}: {1}'.format(project_name, len(class_entities)))
columns = ['Project', 'NumberOfClass']
columns.extend(TestabilityMetrics.get_all_metrics_names())
dummy_data = [0 for i in range(0, len(columns) - 2)]
dummy_data.insert(0, project_name)
dummy_data.insert(1, len(class_entities))
df = | pd.DataFrame(data=[dummy_data], columns=columns) | pandas.DataFrame |
import sys
import re
import requests
from bs4 import BeautifulSoup as soup
import pandas as pd
def ItemResults(item):
'''
Function for scrapping list of items available for the desired one.
The function scrapping:
- Item name
- Item link
- Item price per piece
- Minimum order quantity
'''
#Connect to the web-site and scrapping top items details
try :
url = requests.get(item['link'])
except:
print('Error in reading page!')
quit()
#get all contents
page = soup(url.content, 'html.parser')
# Find result items div
containers = page.findAll('div',{'class':'item-info'})
# intiate items dataframe
items = | pd.DataFrame(columns=['name','item','link','price','min-order']) | pandas.DataFrame |
# %%
import math
import multiprocessing as mp
import numpy as np
import pandas as pd
import pickle
import string
from sklearn.dummy import DummyClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import classification_report, confusion_matrix, f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
# %%
# config
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 50)
#%%
alexa = pd.read_csv('train/top-1m-alexa-210307.csv', usecols=[1], names=['hostname'])
print(alexa.head())
# %%
# cisco = pd.read_csv('train/top-1m-cisco-210307.csv', usecols=[1], names=['hostname'])
# print(cisco.head())
# find count of hostnames matching across list
#print (alexa[alexa['hostname'].isin(cisco['hostname'])].size)
# %%
malware_pd = | pd.read_csv('train\hosts-210311.txt', delim_whitespace=True, usecols=[1], names=['hostname'], skiprows=39, skipfooter=11) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
np.random.seed(42)
# concatenated dataframe file for all simulation
# set the file name and path
# reorganize dataframe and split data
df = pd.read_csv('df_cdt/cell250/cell250_case1_all_data.csv')
X = df.drop(['Class', 'ID'], axis=1)
y = df.Class
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# set the hyperparameter for searching for each algorithm
clf_rf = RandomForestClassifier(n_jobs=-1)
param_grid_rf = {'n_estimators': np.arange(40,110,10),
'max_features': [0.1, 0.2, 0.3, 0.4, 'sqrt', 'log2']}
gs_rf = GridSearchCV(clf_rf, param_grid_rf, verbose=2, cv=5, n_jobs=-1)
gs_rf.fit(X_train, y_train)
result_rf = pd.DataFrame(gs_rf.cv_results_)
result_rf.to_csv('df_cdt/cell250/cell250_gridsearch_rf.csv', index=False)
clf_knn = KNeighborsClassifier(n_jobs=-1)
param_grid_knn = {'n_neighbors': np.arange(3,17,2)}
gs_knn = GridSearchCV(clf_knn, param_grid_knn, verbose=3, cv=5, n_jobs=-1)
gs_knn.fit(X_train, y_train)
result_knn = | pd.DataFrame(gs_knn.cv_results_) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, | StringIO(data) | pandas.compat.StringIO |
#!/usr/bin/env python3
# coding: utf-8
"""Abstract command classes for hicstuff
This module contains all classes related to hicstuff
commands:
-iteralign (iterative mapping)
-digest (genome chunking)
-cutsite (preprocess fastq by cutting reads into digestion products)
-filter (Hi-C 'event' sorting: loops, uncuts, weird
and 'true contacts')
-view (map visualization)
-pipeline (whole contact map generation)
-distancelaw (Analysis tool and plot for the distance law)
Running 'pipeline' implies running 'digest', but not
iteralign or filter unless specified, because they can
take up a lot of time for dimnishing returns.
Note
----
Structure based on <NAME> (rgreinho) tutorial on subcommands in
docopt : https://github.com/rgreinho/docopt-subcommands-example
cmdoret, 20181412
Raises
------
NotImplementedError
Will be raised if AbstractCommand is called for
some reason instead of one of its children.
ValueError
Will be raised if an incorrect chunking method (e.g.
not an enzyme or number or invalid range view is
specified.
"""
import re
import sys, os, shutil
import tempfile
from os.path import join, dirname
from matplotlib import pyplot as plt
from matplotlib import cm
from docopt import docopt
import pandas as pd
import numpy as np
import pysam as ps
import glob
import copy
from Bio import SeqIO
import hicstuff.view as hcv
import hicstuff.hicstuff as hcs
import hicstuff.cutsite as hcc
import hicstuff.digest as hcd
import hicstuff.iteralign as hci
import hicstuff.filter as hcf
from hicstuff.version import __version__
import hicstuff.io as hio
from hicstuff.log import logger
import hicstuff.pipeline as hpi
import hicstuff.distance_law as hcdl
DIVERGENT_CMAPS = [
"PiYG",
"PRGn",
"BrBG",
"PuOr",
"RdGy",
"RdBu",
"RdYlBu",
"RdYlGn",
"Spectral",
"coolwarm",
"bwr",
"seismic",
]
class AbstractCommand:
"""Abstract base command class
Base class for the commands from which
other hicstuff commands derive.
"""
def __init__(self, command_args, global_args):
"""Initialize the commands"""
self.args = docopt(self.__doc__, argv=command_args)
self.global_args = global_args
# Map Hi-C format to file extension
self.fmt2ext = {"cool": ".cool", "bg2": ".bg2", "graal": ".mat.tsv"}
def execute(self):
"""Execute the commands"""
raise NotImplementedError
def check_output_path(self, path, force=False):
"""Throws error if the output file exists. Create required file tree otherwise."""
# Get complete output filename and prevent overwriting unless force is enabled
if not force and os.path.exists(path):
raise IOError(
"Output file already exists. Use --force to overwrite"
)
if dirname(path):
os.makedirs(dirname(path), exist_ok=True)
class Iteralign(AbstractCommand):
"""Iterative mapping command
Truncate reads from a fastq file to 20 basepairs and iteratively extend and
re-align the unmapped reads to optimize the proportion of uniquely aligned
reads in a 3C library.
usage:
iteralign [--aligner=bowtie2] [--threads=1] [--min-len=20] [--read-len=INT]
[--tempdir=DIR] --out-bam=FILE --genome=FILE <reads.fq>
arguments:
reads.fq Fastq file containing the reads to be aligned
options:
-g, --genome=FILE The genome on which to map the reads. Must be
the path to the bowtie2/bwa index if using bowtie2/bwa
or to the genome in fasta format if using minimap2.
-t, --threads=INT Number of parallel threads allocated for the
alignment [default: 1].
-T, --tempdir=DIR Temporary directory. Defaults to current
directory.
-a, --aligner=bowtie2 Choose alignment software between bowtie2,
minimap2 or bwa. minimap2 should only be used for
reads > 100 bp. [default: bowtie2]
-l, --min-len=INT Length to which the reads should be
truncated [default: 20].
-o, --out-bam=FILE Path where the alignment will be written in
BAM format.
-R, --read-len=INT Read length in input FASTQ file. If not provided,
this is estimated from the first read in the file.
"""
def execute(self):
read_len = self.args["--read-len"]
if read_len is not None:
read_len = int(read_len)
if not self.args["--tempdir"]:
self.args["--tempdir"] = "."
temp_directory = hio.generate_temp_dir(self.args["--tempdir"])
hci.iterative_align(
self.args["<reads.fq>"],
temp_directory,
self.args["--genome"],
self.args["--threads"],
self.args["--out-bam"],
aligner=self.args["--aligner"],
min_len=int(self.args["--min-len"]),
read_len=read_len,
)
# Deletes the temporary folder
shutil.rmtree(temp_directory)
class Digest(AbstractCommand):
"""Genome chunking command
Digests a fasta file into fragments based on a restriction enzyme or a
fixed chunk size. Generates two output files into the target directory
named "info_contigs.txt" and "fragments_list.txt"
usage:
digest [--plot] [--figdir=FILE] [--force] [--circular] [--size=0]
[--outdir=DIR] --enzyme=ENZ <fasta>
arguments:
fasta Fasta file to be digested
options:
-c, --circular Specify if the genome is circular.
-e, --enzyme=ENZ[,ENZ2,...] A restriction enzyme or an integer
representing fixed chunk sizes (in bp).
Multiple comma-separated enzymes can
be given.
-F, --force Write even if the output file already exists.
-s, --size=INT Minimum size threshold to keep
fragments. [default: 0]
-o, --outdir=DIR Directory where the fragments and
contigs files will be written.
Defaults to current directory.
-p, --plot Show a histogram of fragment length
distribution after digestion.
-f, --figdir=FILE Path to directory of the output figure.
By default, the figure is only shown
but not saved.
output:
fragments_list.txt: information about restriction fragments (or chunks)
info_contigs.txt: information about contigs or chromosomes
"""
def execute(self):
# If circular is not specified, change it from None to False
if not self.args["--circular"]:
self.args["--circular"] = False
if not self.args["--outdir"]:
self.args["--outdir"] = os.getcwd()
# Create output directory if it does not exist
if os.path.exists(self.args["--outdir"]):
if not self.args["--force"]:
raise IOError(
"Output directory already exists. Use --force to overwrite"
)
else:
os.makedirs(self.args["--outdir"], exist_ok=True)
if self.args["--figdir"]:
figpath = join(self.args["--figdir"], "frags_hist.pdf")
else:
figpath = None
# Split into a list if multiple enzymes given
enzyme = self.args["--enzyme"]
if re.search(r",", enzyme):
enzyme = enzyme.split(",")
hcd.write_frag_info(
self.args["<fasta>"],
enzyme,
self.args["--size"],
output_dir=self.args["--outdir"],
circular=self.args["--circular"],
)
hcd.frag_len(
output_dir=self.args["--outdir"],
plot=self.args["--plot"],
fig_path=figpath,
)
class Cutsite(AbstractCommand):
"""Cutsite command
Generates new gzipped fastq files from original fastq. The function will cut
the reads at their religation sites and creates new pairs of reads with the
different fragments obtained after cutting at the digestion sites.
There are three choices to how combine the fragments. 1. "for_vs_rev": All
the combinations are made between one forward fragment and one reverse
fragment. 2. "all": All 2-combinations are made. 3. "pile": Only
combinations between adjacent fragments in the initial reads are made.
usage:
cutsite --forward=FILE --reverse=FILE --prefix=STR --enzyme=STR
[--threads=1] [--mode=for_vs_rev]
options:
-1, --forward=FILE Fastq file containing the forward reads to
digest.
-2, --reverse=FILE Fastq file containing the reverse reads to
digest.
-p, --prefix=STR Prefix of the path where to write the digested
gzipped fastq files. Filenames will be added the
suffix "_{1,2}.fq.gz".
-e, --enzyme=STR The list of restriction enzyme used to digest
the genome separated by a comma. Example:
HpaII,MluCI.
-m, --mode=STR Digestion mode. There are three possibilities:
"for_vs_rev", "all" and "pile". The first one
"for_vs_rev" makes all possible contact between
fragments from forward read versus the fragments
of the reverse reads. The second one "all"
consist two make all pairs of fragments
possible. The third one "pile" will make the
contacts only with the adjacent fragments.
[Default: for_vs_rev]
-t, --threads=INT Number of parallel threads allocated for the
alignement. [Default: 1]
"""
def execute(self):
# Check for mandatory options
for option in ["--prefix", "--forward", "--reverse"]:
if self.args[option] is None:
raise ValueError(f"{option} is mandatory.")
prefix = self.args["--prefix"]
# Create output directory if it does not exist
if dirname(prefix):
os.makedirs(dirname(prefix), exist_ok=True)
output_for = prefix + "_1.fq.gz"
output_rev = prefix + "_2.fq.gz"
# Digestion of the reads.
logger.info("Digestion of the reads:")
logger.info("Enzyme used: {0}".format(self.args["--enzyme"]))
logger.info(
"Mode used to cut the reads: {0}".format(self.args["--mode"])
)
hcc.cut_ligation_sites(
self.args["--forward"],
self.args["--reverse"],
output_for,
output_rev,
enzyme=self.args["--enzyme"],
mode=self.args["--mode"],
n_cpu=int(self.args["--threads"]),
)
class Filter(AbstractCommand):
"""Mapping event filtering command
Filters spurious 3C events such as loops and uncuts from the library based
on a minimum distance threshold automatically estimated from the library by
default. Can also plot 3C library statistics.
usage:
filter [--interactive | --thresholds INT-INT] [--plot]
[--figdir FILE] [--prefix STR] <input> <output>
arguments:
input 2D BED file containing coordinates of Hi-C interacting
pairs, the index of their restriction fragment and their
strands.
output Path to the filtered file, in the same format as the input.
options:
-f, --figdir=DIR Path to the output figure directory.
By default, the figure is only shown
but not saved.
-i, --interactive Interactively shows plots and asks
for thresholds.
-p, --plot Shows plots of library composition
and 3C events abundance.
-P, --prefix STR If the library has a name, it will
be shown on the figures.
-t, --thresholds=INT-INT Manually defines integer values for
the thresholds in the order
[uncut, loop]. Reads above those values
are kept.
"""
def execute(self):
figpath = None
if self.args["--thresholds"]:
# Thresholds supplied by user beforehand
uncut_thr, loop_thr = self.args["--thresholds"].split("-")
try:
uncut_thr = int(uncut_thr)
loop_thr = int(loop_thr)
except ValueError:
logger.error(
"You must provide integer numbers for the thresholds."
)
else:
# Threshold defined at runtime
if self.args["--figdir"]:
figpath = join(self.args["--figdir"], "event_distance.pdf")
if not os.path.exists(self.args["--figdir"]):
os.makedirs(self.args["--figdir"])
uncut_thr, loop_thr = hcf.get_thresholds(
self.args["<input>"],
interactive=self.args["--interactive"],
plot_events=self.args["--plot"],
fig_path=figpath,
prefix=self.args["--prefix"],
)
# Filter library and write to output file
figpath = None
if self.args["--figdir"]:
figpath = join(self.args["--figdir"], "event_distribution.pdf")
hcf.filter_events(
self.args["<input>"],
self.args["<output>"],
uncut_thr,
loop_thr,
plot_events=self.args["--plot"],
fig_path=figpath,
prefix=self.args["--prefix"],
)
class View(AbstractCommand):
"""Contact map visualization command
Visualize a Hi-C matrix file as a heatmap of contact frequencies. Allows to
tune visualisation by binning and normalizing the matrix, and to save the
output image to disk. If no output is specified, the output is displayed.
usage:
view [--binning=1] [--despeckle] [--frags FILE] [--trim INT] [--n-mad 3.0] [--lines]
[--normalize] [--min=0] [--max=99%] [--output=IMG] [--cmap=Reds] [--dpi=300]
[--transform=STR] [--circular] [--region=STR] <contact_map> [<contact_map2>]
arguments:
contact_map Sparse contact matrix in bg2, cool or graal format
contact_map2 Sparse contact matrix in bg2, cool or graal format,
if given, the log ratio of contact_map/contact_map2
will be shown.
options:
-b, --binning=INT[bp|kb|Mb|Gb] Rebin the matrix. If no unit is given, bins will
be merged by groups of INT. If a unit is given,
bins of that size will be generated. [default: 1]
-c, --cmap=STR The name of a matplotlib colormap to
use for the matrix. [default: Reds]
-C, --circular Use if the genome is circular.
-d, --despeckle Remove sharp increases in long range
contact by averaging surrounding
values.
-D, --dpi=INT Map resolution in DPI (dots per inch). [default: 300]
-f, --frags=FILE Required for bp binning and chromosome lines.
Tab-separated file with headers, containing
fragments start position in the 3rd
column, as generated by hicstuff
pipeline.
-T, --transform=STR Apply a mathematical transformation to pixel values
to improve visibility of long range signals. Possible
values are: log2, log10, ln, sqrt, exp0.2.
-l, --lines Add dotted lines marking separation between chromosomes
or contigs. Requires --frags.
-M, --max=INT Saturation threshold. Maximum pixel
value is set to this number. Can be
followed by % to use a percentile of
nonzero pixels in the contact
map. [default: 99%]
-m, --min=INT Minimum of the colorscale, works
identically to --max. [default: 0]
-N, --n-mad=INT Number of median absolute deviations (MAD) from the median
of log bin sums allowed to keep bins in the normalization
procedure [default: 3.0].
-n, --normalize Should ICE normalization be performed
before rendering the matrix ?
-o, --output=FILE Name of the image file where the view is stored.
-r, --region=STR[;STR] Only view a region of the contact map.
Regions are specified as UCSC strings.
(e.g.:chr1:1000-12000). If only one
region is given, it is viewed on the
diagonal. If two regions are given,
The contacts between both are shown.
-t, --trim=INT Trims outlier rows/columns from the
matrix if the sum of their contacts
deviates from the mean by more than
INT standard deviations.
"""
def data_transform(self, dense_map, operation="log10"):
"""
Apply a mathematical operation on a dense Hi-C map. Valid
operations are: log2, log10, ln, sqrt, exp0.2
"""
ops = {
"log10": np.log10,
"log2": np.log2,
"ln": np.log,
"sqrt": np.sqrt,
}
if operation in ops:
return ops[operation](dense_map)
elif re.match(r"exp", operation):
splitop = operation.split("exp")
exp_val = float(splitop[1])
return dense_map ** exp_val
elif hasattr(np, operation) and callable(np.__dict__[operation]):
logger.warning("Using built-in numpy callable: %s", operation)
return np.__dict__[operation](dense_map)
else:
raise TypeError("Supplied transform function is not supported.")
def process_matrix(self, sparse_map):
"""
Performs any combination of binning, normalisation, log transformation,
trimming and subsetting based on the attributes of the instance class.
"""
# BINNING
if self.binning > 1:
if self.bp_unit:
self.pos = self.frags.iloc[:, 2]
binned_map, binned_pos = hcs.bin_bp_sparse(
M=sparse_map, positions=self.pos, bin_len=self.binning
)
# Get bin numbers of chromosome starts
binned_start = np.append(
np.where(binned_pos == 0)[0], len(binned_pos)
)
# Get bin length of each chromosome
num_binned = binned_start[1:] - binned_start[:-1]
# Get unique chromosome names without losing original order
# (numpy.unique sorts output)
chr_names_idx = np.unique(
self.frags.iloc[:, 1], return_index=True
)[1]
chr_names = [
self.frags.iloc[index, 1]
for index in sorted(chr_names_idx)
]
binned_chrom = np.repeat(chr_names, num_binned)
binned_frags = pd.DataFrame(
{"chrom": binned_chrom, "start_pos": binned_pos[:, 0]}
)
binned_frags["end_pos"] = binned_frags.groupby("chrom")[
"start_pos"
].shift(-1)
chrom_ends = self.frags.groupby("chrom").end_pos.max()
# Fill ends of chromosome bins with actual chromosome length
for cn in chrom_ends.index:
binned_frags.end_pos[
np.isnan(binned_frags.end_pos)
& (binned_frags.chrom == cn)
] = chrom_ends[cn]
else:
# Note this is a basic binning procedure, chromosomes are
# not taken into account -> last few fragments of a chrom
# are merged with the first few of the next
binned_map = hcs.bin_sparse(
M=sparse_map, subsampling_factor=self.binning
)
if self.frags:
binned_frags = self.frags.iloc[:: self.binning, :]
binned_frags = binned_frags.reset_index(drop=True)
# Since matrix binning ignores chromosomes, we
# have to do the same procedure with fragments
# we just correct the coordinates to start at 0
def shift_min(x):
try:
x[x == min(x)] = 0
except ValueError:
pass
return x
binned_frags.start_pos = binned_frags.groupby(
"chrom", sort=False
).start_pos.apply(shift_min)
else:
binned_frags = self.frags
else:
binned_map = sparse_map
binned_frags = self.frags
# TRIMMING
if self.args["--trim"]:
try:
trim_std = float(self.args["--trim"])
except ValueError:
logger.error(
"You must specify a number of standard deviations for "
"trimming"
)
raise
binned_map = hcs.trim_sparse(binned_map, n_mad=trim_std)
# NORMALIZATION
if self.args["--normalize"]:
binned_map = hcs.normalize_sparse(
binned_map, norm="ICE", n_mad=float(self.args["--n-mad"])
)
# ZOOM REGION
if self.args["--region"]:
if self.args["--lines"]:
raise NotImplementedError(
"Chromosome lines are currently incompatible with a region zoom"
)
if self.frags is None:
logger.error(
"A fragment file must be provided to subset "
"genomic regions. See hicstuff view --help"
)
sys.exit(1)
# Load chromosomes and positions from fragments list
reg_pos = binned_frags[['chrom', 'start_pos']]
region = self.args["--region"]
if ";" in region:
# 2 input regions: zoom anywhere in matrix
self.symmetric = False
reg1, reg2 = region.split(";")
reg1 = parse_ucsc(reg1, reg_pos)
reg2 = parse_ucsc(reg2, reg_pos)
else:
# Only 1 input region: zoom on diagonal
region = parse_ucsc(region, reg_pos)
reg1 = reg2 = region
binned_map = binned_map.tocsr()
binned_map = binned_map[reg1[0] : reg1[1], reg2[0] : reg2[1]]
binned_map = binned_map.tocoo()
return binned_map, binned_frags
def execute(self):
input_map = self.args["<contact_map>"]
hic_fmt = hio.get_hic_format(input_map)
cmap = self.args["--cmap"]
# Switch to a divergent colormap for plotting ratios
if (
self.args["<contact_map2>"] is not None
and cmap not in DIVERGENT_CMAPS
):
# In case user specified a custom cmap incompatible with ratios
if cmap != "Reds":
logger.warning(
"You chose a non-divergent colormap. Valid divergent "
"cmaps are:\n\t{}".format(" ".join(DIVERGENT_CMAPS))
)
logger.info(
"Defaulting to seismic colormap for ratios. You can pick "
"another divergent colormap if you wish."
)
cmap = "seismic"
self.bp_unit = False
bin_str = self.args["--binning"].upper()
self.symmetric = True
transform = self.args["--transform"]
try:
# Subsample binning
self.binning = int(bin_str)
except ValueError:
if re.match(r"^[0-9]+[KMG]?B[P]?$", bin_str):
if hic_fmt == "graal" and not self.args["--frags"]:
logger.error(
"A fragment file must be provided to perform "
"basepair binning. See hicstuff view --help"
)
sys.exit(1)
# Load positions from fragments list
self.binning = parse_bin_str(bin_str)
self.bp_unit = True
else:
logger.error(
"Please provide an integer or basepair value for binning."
)
raise
sparse_map, self.frags, _ = hio.flexible_hic_loader(
input_map, fragments_file=self.args["--frags"], quiet=True
)
output_file = self.args["--output"]
processed_map, frags = self.process_matrix(sparse_map)
# If 2 matrices given compute log ratio
if self.args["<contact_map2>"]:
sparse_map2, _, _ = hio.flexible_hic_loader(
self.args["<contact_map2>"],
fragments_file=self.args["--frags"],
quiet=True,
)
processed_map2, _ = self.process_matrix(sparse_map2)
if sparse_map2.shape != sparse_map.shape:
logger.error(
"You cannot compute the ratio of matrices with "
"different dimensions"
)
# Get log of values for both maps
processed_map.data = np.log2(processed_map.data)
processed_map2.data = np.log2(processed_map2.data)
# Note: Taking diff of logs instead of log of ratio because sparse
# mat division yields dense matrix in current implementation.
# Changing base to 2 afterwards.
processed_map = processed_map.tocsr() - processed_map2.tocsr()
processed_map = processed_map.tocoo()
processed_map.data[np.isnan(processed_map.data)] = 0.0
# Log transformation done already
transform = False
if self.args["--despeckle"]:
processed_map = hcs.despeckle_simple(processed_map)
try:
if self.symmetric:
dense_map = hcv.sparse_to_dense(
processed_map, remove_diag=False
)
else:
dense_map = processed_map.toarray()
def set_v(v, mat):
if "%" in v:
try:
valid_pixels = (mat > 0) & (np.isfinite(mat))
val = np.percentile(
mat[valid_pixels], float(v.strip("%"))
)
# No nonzero / finite value
except IndexError:
val = 0
else:
val = float(v)
return val
dense_map = dense_map.astype(float)
self.vmax = set_v(self.args["--max"], dense_map)
self.vmin = set_v(self.args["--min"], dense_map)
if self.args["<contact_map2>"]:
self.vmin, self.vmax = -2, 2
# Log transform the map and the colorscale limits if needed
if transform:
dense_map = self.data_transform(dense_map, transform)
# self.vmin = np.percentile(dense_map[np.isfinite(dense_map)], 1)
# self.vmax = self.data_transform(self.vmax, transform)
self.vmax = set_v(self.args["--max"], dense_map)
self.vmin = set_v(self.args["--min"], dense_map)
else:
# Set 0 values in matrix to NA
dense_map[dense_map == 0] = np.inf
# Get chromosome coordinates if required
if self.args["--lines"]:
chrom_starts = np.where(np.diff(frags.start_pos) < 0)[0] + 1
else:
chrom_starts = None
# Display NA values in white
current_cmap = cm.get_cmap().copy()
current_cmap.set_bad(color=current_cmap(0))
hcv.plot_matrix(
dense_map,
filename=output_file,
vmin=self.vmin,
vmax=self.vmax,
dpi=int(self.args["--dpi"]),
cmap=cmap,
chrom_starts=chrom_starts,
)
except MemoryError:
logger.error("contact map is too large to load, try binning more")
class Pipeline(AbstractCommand):
"""Whole (end-to-end) contact map generation command
Entire Pipeline to process fastq files into a Hi-C matrix. Uses all the
individual components of hicstuff.
usage:
pipeline [--aligner=bowtie2] [--centromeres=FILE] [--circular] [--distance-law]
[--duplicates] [--enzyme=5000] [--filter] [--force] [--mapping=normal]
[--matfmt=graal] [--no-cleanup] [--outdir=DIR] [--plot] [--prefix=PREFIX]
[--quality-min=30] [--read-len=INT] [--remove-centromeres=0] [--size=0]
[--start-stage=fastq] [--threads=1] [--tmpdir=DIR] --genome=FILE <input1> [<input2>]
arguments:
input1: Forward fastq file, if start_stage is "fastq", sam
file for aligned forward reads if start_stage is
"bam", or a .pairs file if start_stage is "pairs".
input2: Reverse fastq file, if start_stage is "fastq", sam
file for aligned reverse reads if start_stage is
"bam", or nothing if start_stage is "pairs".
options:
-a, --aligner=STR Alignment software to use. Can be either
bowtie2, minimap2 or bwa. minimap2 should
only be used for reads > 100 bp.
[default: bowtie2]
-c, --centromeres=FILE Positions of the centromeres separated by
a space and in the same order than the
chromosomes. Discordant with the circular
option.
-C, --circular Enable if the genome is circular.
Discordant with the centromeres option.
-d, --distance-law If enabled, generates a distance law file
with the values of the probabilities to
have a contact between two distances for
each chromosomes or arms if the file with
the positions has been given. The values
are not normalized, or averaged.
-D, --duplicates Filter out PCR duplicates based on read
positions.
-e, --enzyme={STR|INT} Restriction enzyme if a string, or chunk
size (i.e. resolution) if a number. Can
also be multiple comma-separated enzymes.
[default: 5000]
-f, --filter Filter out spurious 3C events (loops and
uncuts) using hicstuff filter. Requires
"-e" to be a restriction enzyme, not a
chunk size. For more informations, see
Cournac et al. BMC Genomics, 2012.
-F, --force Write even if the output file already exists.
-g, --genome=FILE Reference genome to map against. Path to
the bowtie2/bwa index if using bowtie2/bwa,
or to a FASTA file if using minimap2.
-m, --mapping=STR normal|iterative|cutsite. Parameter of
mapping. "normal": Directly map reads
without any process. "iterative": Map
reads iteratively using iteralign, by
truncating reads to 20bp and then
repeatedly extending to align them.
"cutsite": Cut reads at the religation
sites of the given enzyme using cutsite,
create new pairs of reads and then align
them ; enzyme is required [default: normal].
-M, --matfmt=STR The format of the output sparse matrix.
Can be "bg2" for 2D Bedgraph format,
"cool" for Mirnylab's cooler software, or
"graal" for graal-compatible plain text
COO format. [default: graal]
-n, --no-cleanup If enabled, intermediary BED files will
be kept after generating the contact map.
Disabled by defaut.
-o, --outdir=DIR Output directory. Defaults to the current
directory.
-p, --plot Generates plots in the output directory
at different steps of the pipeline.
-P, --prefix=STR Overrides default filenames and prefixes all
output files with a custom name.
-q, --quality-min=INT Minimum mapping quality for selecting
contacts. [default: 30].
-r, --remove-centromeres=INT Integer. Number of kb that will be remove around
the centromere position given by in the centromere
file. [default: 0]
-R, --read-len=INT Maximum read length in the fastq file. Optionally
used in iterative alignment mode. Estimated from
the first read by default. Useful if input fastq
is a composite of different read lengths.
-s, --size=INT Minimum size threshold to consider
contigs. Keep all contigs by default.
[default: 0]
-S, --start-stage=STR Define the starting point of the pipeline
to skip some steps. Default is "fastq" to
run from the start. Can also be "bam" to
skip the alignment, "pairs" to start from a
single pairs file or "pairs_idx" to skip
fragment attribution and only build the
matrix. [default: fastq]
-t, --threads=INT Number of threads to allocate.
[default: 1].
-T, --tmpdir=DIR Directory for storing intermediary BED
files and temporary sort files. Defaults
to the output directory.
output:
abs_fragments_contacts_weighted.txt: the sparse contact map
fragments_list.txt: information about restriction fragments (or chunks)
info_contigs.txt: information about contigs or chromosomes
hicstuff.log: details and statistics about the run.
"""
def execute(self):
if self.args["--filter"] and self.args["--enzyme"].isdigit():
raise ValueError(
"You cannot filter without specifying a restriction enzyme."
)
if not self.args["--outdir"]:
self.args["--outdir"] = os.getcwd()
if self.args["--matfmt"] not in ("graal", "bg2", "cool"):
logger.error("matfmt must be either bg2, cool or graal.")
raise ValueError
read_len = self.args["--read-len"]
if read_len is not None:
read_len = int(read_len)
hpi.full_pipeline(
genome=self.args["--genome"],
input1=self.args["<input1>"],
input2=self.args["<input2>"],
aligner=self.args["--aligner"],
centromeres=self.args["--centromeres"],
circular=self.args["--circular"],
distance_law=self.args["--distance-law"],
enzyme=self.args["--enzyme"],
filter_events=self.args["--filter"],
force=self.args["--force"],
mapping=self.args["--mapping"],
mat_fmt=self.args["--matfmt"],
min_qual=int(self.args["--quality-min"]),
min_size=int(self.args["--size"]),
no_cleanup=self.args["--no-cleanup"],
out_dir=self.args["--outdir"],
pcr_duplicates=self.args["--duplicates"],
plot=self.args["--plot"],
prefix=self.args["--prefix"],
read_len=read_len,
remove_centros=self.args["--remove-centromeres"],
start_stage=self.args["--start-stage"],
threads=int(self.args["--threads"]),
tmp_dir=self.args["--tmpdir"],
)
class Scalogram(AbstractCommand):
"""
Generate a scalogram.
usage:
scalogram [--cmap=viridis] [--centromeres=FILE] [--frags=FILE] [--range=INT-INT]
[--threads=1] [--output=FILE] [--normalize]
[--indices=INT-INT] [--despeckle] <contact_map>
argument:
<contact_map> The sparse Hi-C contact matrix.
options:
-C, --cmap=STR The matplotlib colormap to use for
the plot. [default: viridis]
-d, --despeckle Remove speckles (artifactual spots)
from the matrix.
-f, --frags=FILE Fragments_list.txt file providing mapping
between genomic coordinates and bin IDs.
-i, --indices=INT-INT The range of bin numbers of the matrix to
use for the plot. Can also be given in
UCSC style genomic coordinates (requires -f).
E.g. chr1:1Mb-10Mb.
-o, --output=FILE Output file where the plot should be
saved. Plot is only displayed by
default.
-n, --normalize Normalize the matrix first.
-r, --range=INT-INT The range of contact distance to look
at. No limit by default. Values in
basepairs by default but a unit can
be specified (kb, Mb, ...).
-t, --threads=INT Parallel processes to run in for
despeckling. [default: 1]
"""
def execute(self):
mat, frags, _ = hio.flexible_hic_loader(
self.args["<contact_map>"], fragments_file=self.args["--frags"]
)
if frags is not None:
# If fragments_list.txt is provided, load chrom start and end columns
frags = pd.read_csv(
self.args["--frags"], delimiter="\t", usecols=(1, 2, 3)
)
if self.args["--range"]:
shortest, longest = self.args["--range"].split("-")
# If range given in number of bins
try:
shortest, longest = int(shortest), int(longest)
# If range given in genomic scale
except ValueError:
shortest, longest = (
parse_bin_str(shortest),
parse_bin_str(longest),
)
# Use average bin size to convert genomic scale to number of bins
avg_res = (frags.end_pos - frags.start_pos).mean()
shortest, longest = (
int(shortest // avg_res),
int(longest // avg_res),
)
if self.args["--indices"]:
start, end = self.args["--indices"].split("-")
# If given in bin numbers
try:
start = int(start)
end = int(end)
# If given in genomic coordinates
except ValueError:
start, end = parse_ucsc(
self.args["--indices"],
frags.loc[:, ["chrom", "start_pos"]],
)
output_file = self.args["--output"]
# good_bins = np.array(range(S.shape[0]))
S = mat.tocsr()
if not self.args["--range"]:
shortest = 0
longest = S.shape[0]
if self.args["--normalize"]:
# good_bins = np.where(hcs.get_good_bins(S, n_std=3) == 1)[0]
S = hcs.normalize_sparse(S, norm="ICE")
S = S.tocsr()
if self.args["--despeckle"]:
S = hcs.despeckle_simple(S, threads=int(self.args["--threads"]))
# Cropping matrix before transforming to dense to reduce memory overhead
# Note we leave a margin equal to longest range so that all windows can be computed
if self.args["--indices"]:
crop_inf, crop_sup = (
max(0, start - longest),
min(S.shape[0], end + longest),
)
crop_later = longest
S = S[crop_inf:crop_sup, crop_inf:crop_sup]
else:
crop_later = 0
D = hcv.sparse_to_dense(S)
D = np.fliplr(np.rot90(hcs.scalogram(D), k=-1))
# Crop the margin left previously to get actual indices on dimenstion 0
# and focus scale to --range on dimension 1
plt.contourf(
D[crop_later : D.shape[1] - crop_later, shortest:longest],
cmap=self.args["--cmap"],
)
if output_file:
plt.savefig(output_file)
else:
plt.show()
class Rebin(AbstractCommand):
"""
Rebins a Hi-C matrix and modifies its fragment and chrom files accordingly.
Output files are in the same format as the input files (cool, graal or bg2).
usage:
rebin [--binning=1] [--frags=FILE] [--force] [--chroms=FILE] <contact_map> <out_prefix>
arguments:
contact_map Sparse contact matrix in graal, cool or bg2 format.
out_prefix Prefix path (without extension) for the output files.
options:
-b, --binning=INT[bp|kb|Mb|Gb] Subsampling factor or fix value in
basepairs to use for binning
[default: 1].
-f, --frags=FILE Tab-separated file with headers,
containing fragments start position in
the 3rd column. This is the file
"fragments_list.txt" generated by
hicstuff pipeline. Required for graal
matrices and recommended for bg2.
-F, --force Write even if the output file already exists.
-c, --chroms=FILE Tab-separated with headers, containing
chromosome names, size, number of
restriction fragments. This is the file
"info_contigs.txt" generated by hicstuff
pipeline.
"""
def execute(self):
prefix = self.args["<out_prefix>"]
bin_str = self.args["--binning"].upper()
hic_fmt = hio.get_hic_format(self.args["<contact_map>"])
# Get complete output filename and prevent overwriting unless --force is enabled
out_name = prefix + self.fmt2ext[hic_fmt]
self.check_output_path(out_name, force=self.args["--force"])
# Load positions from fragments list and chromosomes from chrom file
map_path = self.args["<contact_map>"]
hic_map, frags, chromlist = hio.flexible_hic_loader(
map_path,
fragments_file=self.args["--frags"],
chroms_file=self.args["--chroms"],
)
if hic_fmt == "graal" and (frags is None or chromlist is None):
raise ValueError(
"You must provide a chroms file and a fragments file "
"when rebinning a matrix in graal format. (hint: the "
"files info_contigs.txt and fragments_list.txt)"
)
# Create output directory if it does not exist
if dirname(prefix):
os.makedirs(dirname(prefix), exist_ok=True)
bp_unit = False
try:
# Subsample binning
binning = int(bin_str)
except ValueError:
# Basepair binning: determine bin size
if re.match(r"^[0-9]+[KMG]?B[P]?$", bin_str):
binning = parse_bin_str(bin_str)
bp_unit = True
else:
logger.error(
"Please provide an integer or basepair value for binning."
)
raise
chromnames = np.unique(frags.chrom)
if bp_unit:
# Basepair binning: Perform binning
hic_map, _ = hcs.bin_bp_sparse(hic_map, frags.start_pos, binning)
for chrom in chromnames:
chrom_mask = frags.chrom == chrom
# For all chromosomes, get new bin start positions
bin_id = frags.loc[chrom_mask, "start_pos"] // binning
frags.loc[chrom_mask, "id"] = bin_id + 1
frags.loc[chrom_mask, "start_pos"] = binning * bin_id
bin_ends = binning * bin_id + binning
# Do not allow bin ends to be larger than chrom size
try:
chromsize = chromlist.length[
chromlist.contig == chrom
].values[0]
except AttributeError:
chromsize = chromlist["length_kb"][
chromlist.contig == chrom
].values[0]
bin_ends[bin_ends > chromsize] = chromsize
frags.loc[frags.chrom == chrom, "end_pos"] = bin_ends
# Account for special cases where restriction fragments are larger than
# bin size, resulting in missing bins (i.e. jumps in bin ids)
id_diff = (
np.array(frags.loc[:, "id"])[1:]
- np.array(frags.loc[:, "id"])[:-1]
)
# Normal jump is 1, new chromosome (reset id) is < 0, abnormal is > 1
# Get panda indices of abnormal jumps
jump_frag_idx = np.where(id_diff > 1)[0]
add_bins = id_diff - 1
# Need to insert [jump] bins after indices with abnormal [jump]
miss_bins = [None] * np.sum(add_bins[jump_frag_idx])
miss_bin_id = 0
for idx in jump_frag_idx:
jump_size = add_bins[idx]
for j in range(1, jump_size + 1):
# New generated bins will be given attributes based on the previous bin
# e.g. if 2 missing bins between bins 2 and 5:
# id[3] = id[2] + 1 * 1 and id[4] = id[2] + 1 * 2
miss_bins[miss_bin_id] = {
"id": frags.loc[idx, "id"] + 1 * j,
"chrom": frags.loc[idx, "chrom"],
"start_pos": frags.loc[idx, "start_pos"] + binning * j,
"end_pos": frags.loc[idx, "end_pos"] + binning * j,
"size": binning,
"gc_content": np.NaN,
}
miss_bin_id += 1
# Shift bins row idx to allow
# Give existing bins spaced row idx to allow inserting missing bins
idx_shift = copy.copy(id_diff)
idx_shift[idx_shift < 1] = 1
existing_bins_idx = np.cumsum(idx_shift)
# Prepend first bin (lost when computing diff)
existing_bins_idx = np.insert(existing_bins_idx, 0, 0)
# Add missing bins to original table, and sort by idx
# missing bins are "holes" in the continuous range of existing bins
missing_bins_idx = sorted(
set(range(existing_bins_idx[0], existing_bins_idx[-1]))
- set(existing_bins_idx)
)
miss_bins_df = pd.DataFrame(
miss_bins, columns=frags.columns, index=missing_bins_idx
)
frags["tmp_idx"] = existing_bins_idx
miss_bins_df["tmp_idx"] = missing_bins_idx
frags = pd.concat([frags, miss_bins_df], axis=0, sort=False)
frags.sort_values("tmp_idx", axis=0, inplace=True)
frags.drop("tmp_idx", axis=1, inplace=True)
else:
# Subsample binning
hic_map = hcs.bin_sparse(hic_map, binning)
# Use index for binning, but keep 1-indexed.
# Exception when binning is 1 (no binning) where no need to shift
shift_id = 0 if binning == 1 else 1
frags.id = (frags.id // binning) + shift_id
# Save original columns order
col_ordered = list(frags.columns)
# Get new start and end position for each bin
frags = frags.groupby(["chrom", "id"], sort=False)
positions = frags.agg({"start_pos": "min", "end_pos": "max"})
positions.reset_index(inplace=True)
# Compute mean for all added features in each index bin
# Normally only other feature is GC content
try:
features = frags.agg("mean")
features.reset_index(inplace=True)
# set new bins positions
frags = features
frags["start_pos"] = 0
frags["end_pos"] = 0
frags.loc[:, positions.columns] = positions
except pd.core.base.DataError:
frags = positions
frags["size"] = frags.end_pos - frags.start_pos
cumul_bins = 0
for chrom in chromnames:
n_bins = frags.start_pos[frags.chrom == chrom].shape[0]
chromlist.loc[chromlist.contig == chrom, "n_frags"] = n_bins
chromlist.loc[
chromlist.contig == chrom, "cumul_length"
] = cumul_bins
cumul_bins += n_bins
# Keep original column order
frags = frags.reindex(columns=col_ordered)
# Write 3 binned output files
hio.flexible_hic_saver(
hic_map,
self.args["<out_prefix>"],
frags=frags,
chroms=chromlist,
hic_fmt=hic_fmt,
)
class Subsample(AbstractCommand):
"""
Subsample contacts from a Hi-C matrix. Probability of sampling is proportional
to the intensity of the bin.
usage:
subsample [--prop=0.1] [--force] <contact_map> <subsampled_prefix>
arguments:
contact_map Sparse contact matrix in graal, bg2 or cool format.
subsampled_prefix Path without extension to the output map in the same
format as the input containing only a fraction of the
contacts.
options:
-p, --prop=FLOAT Proportion of contacts to sample from the input matrix
if between 0 and 1. Raw number of contacts to keep if
superior to 1. [default: 0.1]
-F, --force Write even if the output file already exists.
"""
def execute(self):
hic_fmt = hio.get_hic_format(self.args["<contact_map>"])
prefix = self.args["<subsampled_prefix>"]
# Get complete output filename and prevent overwriting unless --force is enabled
out_name = prefix + self.fmt2ext[hic_fmt]
self.check_output_path(out_name, force=self.args["--force"])
mat, frags, _ = hio.flexible_hic_loader(
self.args["<contact_map>"], quiet=True
)
subsampled = hcs.subsample_contacts(mat, float(self.args["--prop"]))
subsampled = subsampled.tocoo()
hio.flexible_hic_saver(
subsampled,
prefix,
frags=frags,
hic_fmt=hic_fmt,
quiet=True,
)
class Convert(AbstractCommand):
"""
Convert between different Hi-C dataformats. Currently supports tsv (graal),
bedgraph2D (bg2) and cooler (cool). Input format is automatically inferred.
usage:
convert [--frags=FILE] [--chroms=FILE] [--force] [--genome=FILE]
[--to=cool] <contact_map> <prefix>
arguments:
contact_map The file containing the contact frequencies.
prefix The prefix path for output files. An extension
will be added to the files depending on the
output format.
options:
-f, --frags=FILE Tab-separated file with headers,
containing columns id, chrom, start_pos,
end_pos size. This is the file
"fragments_list.txt" generated by
hicstuff pipeline. Required for graal
matrices and recommended for bg2.
-F, --force Write even if the output file already exists.
-g, --genome=FILE Optional genome file used to add a GC content
column to the fragments table. This is
required to generate instagraal-compatible
files.
-c, --chroms=FILE Tab-separated with headers, containing
columns contig, length, n_frags, cumul_length.
This is the file "info_contigs.txt" generated
by hicstuff pipeline.
-T, --to=STR The format to which files should be
converted. [default: cool]
"""
def execute(self):
out_fmt = self.args["--to"]
mat_path = self.args["<contact_map>"]
frags_path = self.args["--frags"]
genome_path = self.args["--genome"]
chroms_path = self.args["--chroms"]
prefix = self.args["<prefix>"]
# Get complete output filename and prevent overwriting unless --force is enabled
out_name = prefix + self.fmt2ext[out_fmt]
self.check_output_path(out_name, force=self.args["--force"])
# Load
mat, frags, chroms = hio.flexible_hic_loader(
mat_path,
fragments_file=frags_path,
chroms_file=chroms_path,
quiet=True,
)
# Modify fragments for instagraal compatibility
# Add fragments size column
chrom_col, start_col, end_col = hio.get_pos_cols(frags)
size = frags[end_col] - frags[start_col]
if "size" not in frags.columns:
frags = frags.join( | pd.DataFrame({"size": size}) | pandas.DataFrame |
#############################################################
# ActivitySim verification against TM1
# <NAME>, <EMAIL>, 02/22/19
# C:\projects\activitysim\verification>python compare_results.py
#############################################################
import pandas as pd
import openmatrix as omx
#############################################################
# INPUTS
#############################################################
pipeline_filename = 'asim/pipeline.h5'
distance_matrix_filename = "asim/skims.omx"
asim_nmtf_alts_filename = "asim/non_mandatory_tour_frequency_alternatives.csv"
process_sp = True # False skip work/sch shadow pricing comparisons, True do them
process_tm1 = True # False only processes asim, True processes tm1 as well
asim_sp_work_filename = "asim/shadow_price_workplace_modeled_size_10.csv"
asim_sp_school_filename = "asim/shadow_price_school_modeled_size_10.csv"
asim_sp_school_no_sp_filename = "asim/shadow_price_school_modeled_size_1.csv"
tm1_access_filename = "tm1/accessibility.csv"
tm1_sp_filename = "tm1/ShadowPricing_9.csv"
tm1_work_filename = "tm1/wsLocResults_1.csv"
tm1_ao_filename = "tm1/aoResults.csv"
tm1_hh_filename = "tm1/householdData_1.csv"
tm1_cdap_filename = "tm1/cdapResults.csv"
tm1_per_filename = "tm1/personData_1.csv"
tm1_tour_filename = "tm1/indivTourData_1.csv"
tm1_jtour_filename = "tm1/jointTourData_1.csv"
tm1_trips_filename = "tm1/indivTripData_1.csv"
tm1_jtrips_filename = "tm1/jointTripData_1.csv"
#############################################################
# OUTPUT FILES FOR DEBUGGING
#############################################################
asim_zones_filename = "asim/asim_zones.csv"
asim_access_filename = "asim/asim_access.csv"
asim_per_filename = "asim/asim_per.csv"
asim_hh_filename = "asim/asim_hh.csv"
asim_tour_filename = "asim/asim_tours.csv"
asim_trips_filename = "asim/asim_trips.csv"
#############################################################
# COMMON LABELS
#############################################################
ptypes = ["", "Full-time worker", "Part-time worker", "University student", "Non-worker",
"Retired", "Student of driving age", "Student of non-driving age",
"Child too young for school"]
mode_labels = ["", "DRIVEALONEFREE", "DRIVEALONEPAY", "SHARED2FREE", "SHARED2PAY", "SHARED3FREE",
"SHARED3PAY", "WALK", "BIKE", "WALK_LOC", "WALK_LRF", "WALK_EXP", "WALK_HVY",
"WALK_COM", "DRIVE_LOC", "DRIVE_LRF", "DRIVE_EXP", "DRIVE_HVY", "DRIVE_COM"]
#############################################################
# DISTANCE SKIM
#############################################################
# read distance matrix (DIST)
distmat = omx.open_file(distance_matrix_filename)["DIST"][:]
#############################################################
# EXPORT TABLES
#############################################################
# write tables for verification
tazs = pd.read_hdf(pipeline_filename, "land_use/initialize_landuse")
tazs["zone"] = tazs.index
tazs.to_csv(asim_zones_filename, index=False)
access = pd.read_hdf(pipeline_filename, "accessibility/compute_accessibility")
access.to_csv(asim_access_filename, index=False)
hh = pd.read_hdf(pipeline_filename, "households/joint_tour_frequency")
hh["household_id"] = hh.index
hh.to_csv(asim_hh_filename, index=False)
per = pd.read_hdf(pipeline_filename, "persons/non_mandatory_tour_frequency")
per["person_id"] = per.index
per.to_csv(asim_per_filename, index=False)
tours = pd.read_hdf(pipeline_filename, "tours/stop_frequency")
tours["tour_id"] = tours.index
tours.to_csv(asim_tour_filename, index=False)
trips = pd.read_hdf(pipeline_filename, "trips/trip_mode_choice")
trips["trip_id"] = trips.index
trips.to_csv(asim_trips_filename, index=False)
#############################################################
# AGGREGATE
#############################################################
# accessibilities
if process_tm1:
tm1_access = pd.read_csv(tm1_access_filename)
tm1_access.to_csv("outputs/tm1_access.csv", na_rep=0)
asim_access = pd.read_csv(asim_access_filename)
asim_access.to_csv("outputs/asim_access.csv", na_rep=0)
#############################################################
# HOUSEHOLD AND PERSON
#############################################################
# work and school location
if process_sp:
if process_tm1:
tm1_markets = ["work_low", "work_med", "work_high", "work_high", "work_very high", "university",
"school_high", "school_grade"]
tm1 = pd.read_csv(tm1_sp_filename)
tm1 = tm1.groupby(tm1["zone"]).sum()
tm1["zone"] = tm1.index
tm1 = tm1.loc[tm1["zone"] > 0]
ws_size = tm1[["zone"]]
for i in range(len(tm1_markets)):
ws_size[tm1_markets[i] + "_modeledDests"] = tm1[tm1_markets[i] + "_modeledDests"]
ws_size.to_csv("outputs/tm1_work_school_location.csv", na_rep=0)
asim_markets = ["work_low", "work_med", "work_high", "work_high", "work_veryhigh", "university",
"highschool", "gradeschool"]
asim = pd.read_csv(asim_sp_work_filename)
asim_sch = pd.read_csv(asim_sp_school_filename)
asim_sch_no_sp = pd.read_csv(asim_sp_school_no_sp_filename)
asim_sch["gradeschool"] = asim_sch_no_sp["gradeschool"] # grade school not shadow priced
asim = asim.set_index("TAZ", drop=False)
asim_sch = asim_sch.set_index("TAZ", drop=False)
asim["gradeschool"] = asim_sch["gradeschool"].loc[asim["TAZ"]].tolist()
asim["highschool"] = asim_sch["highschool"].loc[asim["TAZ"]].tolist()
asim["university"] = asim_sch["university"].loc[asim["TAZ"]].tolist()
ws_size = asim[["TAZ"]]
for i in range(len(asim_markets)):
ws_size[asim_markets[i] + "_asim"] = asim[asim_markets[i]]
ws_size.to_csv("outputs/asim_work_school_location.csv", na_rep=0)
# work county to county flows
tazs = pd.read_csv(asim_zones_filename)
counties = ["", "SF", "SM", "SC", "ALA", "CC", "SOL", "NAP", "SON", "MAR"]
tazs["COUNTYNAME"] = pd.Series(counties)[tazs["county_id"].tolist()].tolist()
tazs = tazs.set_index("zone", drop=False)
if process_tm1:
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work["HomeCounty"] = tazs["COUNTYNAME"].loc[tm1_work["HomeTAZ"]].tolist()
tm1_work["WorkCounty"] = tazs["COUNTYNAME"].loc[tm1_work["WorkLocation"]].tolist()
tm1_work_counties = tm1_work.groupby(["HomeCounty", "WorkCounty"]).count()["HHID"]
tm1_work_counties = tm1_work_counties.reset_index()
tm1_work_counties = tm1_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
tm1_work_counties.to_csv("outputs/tm1_work_counties.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap["HomeCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["home_taz"]].tolist()
asim_cdap["WorkCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["workplace_zone_id"]].tolist()
asim_work_counties = asim_cdap.groupby(["HomeCounty", "WorkCounty"]).count()["household_id"]
asim_work_counties = asim_work_counties.reset_index()
asim_work_counties = asim_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
asim_work_counties.to_csv("outputs/asim_work_counties.csv", na_rep=0)
# auto ownership - count of hhs by num autos by taz
if process_tm1:
tm1_ao = pd.read_csv(tm1_ao_filename)
tm1_hh = pd.read_csv(tm1_hh_filename)
tm1_ao = tm1_ao.set_index("HHID", drop=False)
tm1_hh["ao"] = tm1_ao["AO"].loc[tm1_hh["hh_id"]].tolist()
tm1_autos = tm1_hh.groupby(["taz", "ao"]).count()["hh_id"]
tm1_autos = tm1_autos.reset_index()
tm1_autos = tm1_autos.pivot(index="taz", columns="ao")
tm1_autos.to_csv("outputs/tm1_autos.csv", na_rep=0)
asim_ao = pd.read_csv(asim_hh_filename)
asim_autos = asim_ao.groupby(["TAZ", "auto_ownership"]).count()["SERIALNO"]
asim_autos = asim_autos.reset_index()
asim_autos = asim_autos.pivot(index="TAZ", columns="auto_ownership")
asim_autos.to_csv("outputs/asim_autos.csv", na_rep=0)
# cdap - ptype count and ptype by M,N,H
if process_tm1:
tm1_cdap = pd.read_csv(tm1_cdap_filename)
tm1_cdap_sum = tm1_cdap.groupby(["PersonType", "ActivityString"]).count()["HHID"]
tm1_cdap_sum = tm1_cdap_sum.reset_index()
tm1_cdap_sum = tm1_cdap_sum.pivot(index="PersonType", columns="ActivityString")
tm1_cdap_sum.to_csv("outputs/tm1_cdap.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap_sum = asim_cdap.groupby(["ptype", "cdap_activity"]).count()["household_id"]
asim_cdap_sum = asim_cdap_sum.reset_index()
asim_cdap_sum = asim_cdap_sum.pivot(index="ptype", columns="cdap_activity")
asim_cdap_sum.to_csv("outputs/asim_cdap.csv", na_rep=0)
# free parking by ptype
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_per["fp_choice"] = (tm1_per["fp_choice"] == 1) # 1=free, 2==pay
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work = tm1_work.set_index("PersonID", drop=False)
tm1_per["WorkLocation"] = tm1_work["WorkLocation"].loc[tm1_per["person_id"]].tolist()
tm1_fp = tm1_per[tm1_per["WorkLocation"] > 0]
tm1_fp = tm1_fp.groupby(["type", "fp_choice"]).count()["hh_id"]
tm1_fp = tm1_fp.reset_index()
tm1_fp = tm1_fp.pivot(index="type", columns="fp_choice")
tm1_fp.to_csv("outputs/tm1_fp.csv", na_rep=0)
asim_cdap["ptypename"] = pd.Series(ptypes)[asim_cdap["ptype"].tolist()].tolist()
asim_fp = asim_cdap.groupby(["ptypename", "free_parking_at_work"]).count()["household_id"]
asim_fp = asim_fp.reset_index()
asim_fp = asim_fp.pivot(index="ptypename", columns="free_parking_at_work")
asim_fp.to_csv("outputs/asim_fp.csv", na_rep=0)
# value of time
if process_tm1:
tm1_per = | pd.read_csv(tm1_per_filename) | pandas.read_csv |
import sys
import time
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from glob import glob
from sklearn.neural_network import MLPClassifier
def load_data(file_name):
print('FILE EXIST')
featuresDF = pd.read_csv(file_name, sep=';', dtype={'STUDENT': str})
return featuresDF
def test_classifier(clf_name, clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
cv_results = pd.DataFrame(clf.cv_results_)
cv_results.to_csv('./results_5/cv_results.csv', sep=';', float_format='%.4f')
y_pred = clf.predict(X_test)
print(clf_name)
print(accuracy_score(y_test, y_pred))
return accuracy_score(y_test, y_pred)
def cross_validate(clf, X, y, features):
group_kfold = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=5)
cv_scores = [clf.fit(X[train], y[train]).score(X[test], y[test])
for train, test in group_kfold.split(X, y, features['FILE'])]
return cv_scores
if __name__ == '__main__':
idx = int(sys.argv[1])
feature_path = '../mfcc_data_19c'
feature_file = sorted(glob(feature_path + '/*.csv'))
feature_file_sorted = sorted(feature_file, key=lambda x: int(x.split('MFCC_')[1].split('.csv')[0]))
print(feature_file_sorted[idx])
feature_file = feature_file_sorted[idx]
features = load_data(feature_file)
no_mfcc = feature_file.split('\\')[-1].strip('.csv').split('_')[-1]
results_file = 'resultsMFCC_{}.csv'.format(no_mfcc)
print(results_file)
results = pd.DataFrame(columns=['No_MFCC', 'Classifier', 'Accuracy'])
# create design matrix X and target vector y
X = features.filter(like='MFCC').values
y = features['LABEL_GROUP'].values
sss = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=1)
sss.get_n_splits(X, y, features['FILE'])
for train_index, test_index in sss.split(X, y, features['FILE']):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print( | pd.DataFrame(y_train) | pandas.DataFrame |
import pandas as pd
import scipy.io as sio
import scipy.interpolate
import numpy as np
import scipy.sparse
import scipy
import gzip
import subprocess
import collections
from collections import defaultdict, Counter
import scipy.sparse as sp_sparse
import warnings
import pickle
import os
#warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Agg')
import pylab as plt
fsize=12
rc_dict = dict(zip(list('NACGT'),list('NTGCA')))
def reverse_complement(seq):
return ''.join([rc_dict[s] for s in seq][::-1])
def generate_dge_matrix(df,read_cutoff=10):
reads_per_cell = df.groupby(df.cell_barcode).size()
cells = reads_per_cell[reads_per_cell>3]
all_genes = pd.Series(df.gene.unique()).sort_values()
all_genes.index = range(len(all_genes))
gene_dict = dict(zip(all_genes.values,range(len(all_genes.values))))
cell_dict = dict(zip(cells.index.values,range(len(cells.index.values))))
rows,cols,vals = [],[],[]
for bc,g in zip(df.cell_barcode.values,df.gene.values):
try:
cell_dict[bc]
except:
pass
else:
rows.append(cell_dict[bc])
cols.append(gene_dict[g])
vals.append(1)
rows.append(len(cell_dict)-1)
cols.append(len(gene_dict)-1)
vals.append(0)
digital_count_matrix = scipy.sparse.csr_matrix((vals,(rows,cols)),dtype=np.float64)
thresholded_cells = np.array(digital_count_matrix.sum(1)).flatten()>read_cutoff
digital_count_matrix = digital_count_matrix[thresholded_cells,:]
expressed_genes = np.array(digital_count_matrix.sum(0)).flatten()>0
all_genes = pd.Series(all_genes[expressed_genes])
digital_count_matrix = digital_count_matrix[:,expressed_genes]
barcodes = cells.index.values[thresholded_cells]
return digital_count_matrix,all_genes,barcodes
def barnyard(cell_data,tickstep=10000,s=4,lim=None,ax=None,fig=None):
species = cell_data.columns[:2]
colors = [(0.8941176470588236, 0.10196078431372549, 0.10980392156862745),
(0.21568627450980393, 0.49411764705882355, 0.7215686274509804),
'gray']
#colors = list(sb.color_palette('Set1',n_colors=2)) + ['gray']
#sb.set_style("white")
#sb.set_style("ticks")
if ax is None:
fig = figure(figsize=(3,3))
ax = fig.add_subplot(111)
counts1 = cell_data.iloc[:,0]
counts2 = cell_data.iloc[:,1]
cell_type1 = counts1>(counts2*9)
cell_type2 = counts2>(counts1*9)
mixed_cells = ~(cell_type1|cell_type2)
plt.scatter(counts1[mixed_cells],
counts2[mixed_cells],
color=colors[2],
s=s,
label=None)
plt.scatter(counts1[cell_type2],
counts2[cell_type2],
color=colors[0],
s=s,
alpha=1,
label=None)
plt.scatter(counts1[cell_type1],
counts2[cell_type1],
color=colors[1],
s=s,
label=None)
plt.scatter([],[],
color=colors[0],
s=10,
label='%d %s (%0.1f'%(sum(cell_type2),species[1],100*float(sum(cell_type2))/len(cell_type2))+'%)',
)
plt.scatter([],[],
color=colors[1],
label='%d %s (%0.1f'%(sum(cell_type1),species[0],100*float(sum(cell_type1))/len(cell_type1))+'%)',
s=10)
plt.scatter([],[],
color=colors[2],
label='%d Mixed (%0.1f'%(sum(mixed_cells),100*float(sum(mixed_cells))/len(mixed_cells))+'%)',
s=10)
if lim==None:
lim = int((counts1+counts2).max()*1.1)
ax.set_xticks(plt.arange(0,lim,tickstep))
ax.set_yticks(plt.arange(0,lim,tickstep))
ax.set_xticklabels(plt.arange(0,lim,tickstep),rotation=90)
ax.axis([-int(lim/30.),lim,-int(lim/30.),lim])
ax.set_xlabel('%s UMI Counts' %species[0],fontsize=fsize)
ax.set_ylabel('%s UMI Counts' %species[1],fontsize=fsize)
ax.tick_params(labelsize=fsize)
ax.yaxis.tick_left()
ax.xaxis.tick_bottom()
ax.legend(fontsize=fsize-1,handletextpad=0.025)
if fig is None:
return 0
else:
return fig,ax
def get_read_threshold(raw_counts, frac_low=0.3, frac_high=0.925, cutoff=100, try_iter=10,window = 20):
#raw_counts = raw_counts[raw_counts>10]
counts = raw_counts.copy()
counts = counts[counts>cutoff]
#print(len(raw_counts),len(counts))
x = np.log10(np.arange(1,len(counts)+1))
y = np.log10(counts).values
f = scipy.interpolate.interp1d(x, y,kind='linear') # smooth the umi-barcode curve
x_hat = np.linspace(x.min(),x.max(),500)
y_hat = f(x_hat)
y_hat = pd.Series(index=x_hat,data=y_hat)
y_hat_prime = (-y_hat).diff(window).iloc[window:].values # get umi difference between two barocdes in step 20
threshold = 10**y_hat.iloc[np.argmax(y_hat_prime)]*0.5 # find the largest diff window
fraction = sum(raw_counts[raw_counts>threshold])/sum(raw_counts)
#filtered_cells = sum(read_counts>threshold)
i=1
while fraction<frac_low or fraction>frac_high: #filtered_cells>2000:
if i>try_iter:
threshold = cutoff
break
threshold = 10**y_hat.iloc[y_hat_prime.argsort()[::-1][i]]#*0.5
fraction = sum(raw_counts[raw_counts>threshold])/sum(raw_counts)
#print(threshold, fraction)
#filtered_cells = sum(read_counts>threshold)
i+=1
fraction = sum(raw_counts[raw_counts>threshold])/sum(raw_counts)
#print(fraction,threshold)
print(fraction, threshold)
return threshold
def plot_read_thresh(read_counts,fig=None,ax=None,frac_low=0.3, frac_high=0.925, cutoff=100, try_iter=10, window = 20):
read_threshold = get_read_threshold(read_counts,frac_low=frac_low, frac_high=frac_high, cutoff=cutoff, try_iter=try_iter, window = window)
threshold = len(read_counts[read_counts>read_threshold])
median_umis = read_counts.sort_values(ascending=False)[:threshold].median()
if ax is None:
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(111)
ax.plot(range(len(read_counts)),
(read_counts.sort_values(ascending=False)).values,
color='lightgray',
linewidth=2)
ax.plot(range(threshold),
(read_counts.sort_values(ascending=False)).values[:threshold],
color='g',
linewidth=0,marker='.')
ax.set_xscale('log')
ax.set_yscale('log')
_ = ax.set_xlabel('# Barcodes (logscale)')
_ = ax.set_ylabel('# UMIs (logscale)')
ax.text(1,10,' n_cells: %d\n read cutoff: %d\n median_umis: %d' %(threshold,read_threshold,median_umis))
#print(read_threshold)
if fig is None:
return read_threshold
else:
return fig,ax,read_threshold
def generate_single_dge_report(output_dir,genome_dir, datatype, sample_name='', overwrite=True, analysis_dir=None,frac_low=0.3, frac_high=0.925, cutoff=100, try_iter=10,window = 20):
# Load gene_info dictionary to assign genes to reads
with open(genome_dir +'/gene_info.pkl', 'rb') as f:
gene_info = pickle.load(f)
gene_dict = gene_info['gene_bins']
exon_gene_start_end_dict = gene_info['genes_to_exons']
start_dict = gene_info['gene_starts']
end_dict = gene_info['gene_ends']
gene_id_to_name = gene_info['gene_id_to_name']
gene_id_to_genome = gene_info['gene_id_to_genome']
gene_id_to_name['Intergenic_mm']='Intergenic'
gene_id_to_name['Ambiguous_mm']='Ambiguous'
gene_id_to_genome['Intergenic_mm']='mm'
gene_id_to_genome['Ambiguous_mm']='mm'
gene_id_to_name['Intergenic_hg']='Intergenic'
gene_id_to_name['Ambiguous_hg']='Ambiguous'
gene_id_to_genome['Intergenic_hg']='hg'
gene_id_to_genome['Ambiguous_hg']='hg'
gene_id_to_name['Intergenic_dm']='Intergenic'
gene_id_to_name['Ambiguous_dm']='Ambiguous'
gene_id_to_genome['Intergenic_dm']='dm'
gene_id_to_genome['Ambiguous_dm']='dm'
# Load the read_assignment file
df = pd.read_csv(output_dir + '/molecule_info/' + sample_name + '_read_assignment.csv')
if datatype=='RNA':
df=df[(df['gene_name']!='Intergenic')&(df['gene_name']!='Ambiguous')]
df=df.reset_index()
#total_reads = df.shape[0]
read_counts = df.groupby('cell_barcode').size().sort_values(ascending=False)
fig,ax,read_thresh = plot_read_thresh(read_counts,frac_low=frac_low, frac_high=frac_high, cutoff=cutoff, try_iter=try_iter, window = window) ###########################
#print(read_thresh)
digital_count_matrix,all_genes,barcodes = generate_dge_matrix(df,read_cutoff=10)
# gene_df and cell_df
gene_df = pd.DataFrame()
gene_df['gene_id'] = all_genes
gene_df['gene_name'] = all_genes.apply(lambda s:gene_id_to_name[s])
gene_df['genome'] = all_genes.apply(lambda s:gene_id_to_genome[s])
species = df.genome.unique()
species_genes = {}
species_gene_inds = {}
species_umi_counts = {}
species_gene_counts = {}
for s in species:
species_genes[s] = all_genes[all_genes.apply(lambda s:gene_id_to_genome[s])==s]
species_gene_inds[s] = np.where(all_genes.apply(lambda s:gene_id_to_genome[s])==s)[0]
species_umi_counts[s] = pd.Series(index=barcodes,
data=np.array(digital_count_matrix[:,species_gene_inds[s]].sum(1)).flatten())
species_gene_counts[s] = pd.Series(index=barcodes,
data=np.array((digital_count_matrix[:,species_gene_inds[s]]>0).sum(1)).flatten())
species_umi_counts = pd.DataFrame(species_umi_counts)
species_gene_counts = pd.DataFrame(species_gene_counts)
species_assignments = pd.Series(['multiplet' for i in range(len(barcodes))])
for s in species:
species_assignments.loc[np.where((species_umi_counts[s]/species_umi_counts.sum(1))>0.9)] = s
cell_df = pd.DataFrame()
cell_df['cell_barcode'] = pd.Series(barcodes)
cell_df['species'] = species_assignments.values
cell_df['umi_count'] = np.array(digital_count_matrix.sum(1)).flatten()
#cell_df['umi_count_50dup'] = cell_df['umi_count'] * 0.5/(1-df.shape[0]/df.counts.sum())
cell_df['gene_count'] = np.array((digital_count_matrix>0).sum(1)).flatten()
# Write unfiltered matrix data
if not os.path.exists(output_dir + '/DGE_unfiltered/'):
os.makedirs(output_dir + '/DGE_unfiltered')
gene_df.to_csv(output_dir + '/DGE_unfiltered/'+ sample_name+ '_genes.csv')
cell_df.to_csv(output_dir + '/DGE_unfiltered/'+ sample_name+ '_cell_metadata.csv',index=False)
#sio.mmwrite(output_dir + '/DGE_unfiltered/'+ sample_name+ '_DGE.mtx', digital_count_matrix)
if not os.path.exists(output_dir + '/DGE_unfiltered/'+ sample_name):
os.makedirs(output_dir + '/DGE_unfiltered/'+ sample_name)
np.savetxt(output_dir + '/DGE_unfiltered/'+ sample_name+ '/genes.txt', gene_df['gene_id'].values,fmt="%s")
np.savetxt(output_dir + '/DGE_unfiltered/'+ sample_name+ '/barcodes.txt', cell_df['cell_barcode'].values,fmt="%s")
sio.mmwrite(output_dir + '/DGE_unfiltered/'+ sample_name+ '/count.mtx', digital_count_matrix.T)
# Filter based on automatic cutoff
valid_cells = np.where(np.array(digital_count_matrix.sum(1)).flatten()>read_thresh)[0]
digital_count_matrix = digital_count_matrix[valid_cells]
barcodes = barcodes[valid_cells]
cell_df = cell_df.iloc[valid_cells]
#digital_count_matrix.sum()/df.shape[0]
# Write filtered matrix data
if not os.path.exists(output_dir + '/DGE_filtered'):
os.makedirs(output_dir + '/DGE_filtered')
gene_df.to_csv(output_dir + '/DGE_filtered/' + sample_name+ '_genes.csv')
cell_df.to_csv(output_dir + '/DGE_filtered/' + sample_name+ '_cell_metadata.csv',index=False)
#sio.mmwrite(output_dir + '/DGE_filtered/' + sample_name+ '_DGE.mtx',digital_count_matrix)
if not os.path.exists(output_dir + '/DGE_filtered/'+ sample_name):
os.makedirs(output_dir + '/DGE_filtered/'+ sample_name)
np.savetxt(output_dir + '/DGE_filtered/'+ sample_name+ '/genes.txt', gene_df['gene_id'].values,fmt="%s")
np.savetxt(output_dir + '/DGE_filtered/'+ sample_name+ '/barcodes.txt', cell_df['cell_barcode'].values,fmt="%s")
sio.mmwrite(output_dir + '/DGE_filtered/'+ sample_name+ '/count.mtx', digital_count_matrix.T)
digital_count_matrix,all_genes,barcodes = generate_dge_matrix(df,read_cutoff=read_thresh) ########################
species_genes = {}
species_gene_inds = {}
species_umi_counts = {}
species_gene_counts = {}
for s in species:
species_genes[s] = all_genes[all_genes.apply(lambda s:gene_id_to_genome[s])==s]
species_gene_inds[s] = np.where(all_genes.apply(lambda s:gene_id_to_genome[s])==s)[0]
species_umi_counts[s] = pd.Series(index=barcodes,
data=np.array(digital_count_matrix[:,species_gene_inds[s]].sum(1)).flatten())
species_gene_counts[s] = pd.Series(index=barcodes,
data=np.array((digital_count_matrix[:,species_gene_inds[s]]>0).sum(1)).flatten())
species_umi_counts = pd.DataFrame(species_umi_counts)
species_gene_counts = pd.DataFrame(species_gene_counts)
species_assignments = pd.Series(['multiplet' for i in range(len(barcodes))])
for s in species:
species_assignments.loc[np.where((species_umi_counts[s]/species_umi_counts.sum(1))>0.9)] = s
species = np.unique(species_assignments.values)
species = species[species!='multiplet']
sublibraries=None
if sublibraries is None:
# Calculate rRNA Percentage:
kmer_len = 30
rrna_sense_kmer_dict = {}
rrna_antisense_kmer_dict = {}
with open('Data/rRNA2.fa') as f:
while True:
line = f.readline()[:-1]
if len(line)==0:
break
if line[0]!='>':
for i in range(len(line)-kmer_len):
kmer = line[i:i+kmer_len]
rrna_sense_kmer_dict[kmer] = 0
line = reverse_complement(line)
for i in range(len(line)-kmer_len):
kmer = line[i:i+kmer_len]
rrna_antisense_kmer_dict[kmer] = 0
kmer_len = 30
mt_rrna_sense_kmer_dict = {}
mt_rrna_antisense_kmer_dict = {}
with open('Data/mt_rRNA2.fa') as f:
while True:
line = f.readline()[:-1]
if len(line)==0:
break
if line[0]!='>':
for i in range(len(line)-kmer_len):
kmer = line[i:i+kmer_len]
mt_rrna_sense_kmer_dict[kmer] = 0
line = reverse_complement(line)
for i in range(len(line)-kmer_len):
kmer = line[i:i+kmer_len]
mt_rrna_antisense_kmer_dict[kmer] = 0
def search_kmers(seq,kmer_dict):
found = False
for i in range(0,41,10):
try:
kmer_dict[seq[i:i+kmer_len]]
found = True
except:
pass
return found
fastqfile = output_dir + '/split_sample/'+sample_name+'_barcode_R2.fq'
if os.path.isfile(fastqfile):
well_counts = {'total_counts':0,
'rRNA_sense_counts':0,
'rRNA_antisense_counts':0,
'mt_rRNA_sense_counts':0,
'mt_rRNA_antisense_counts':0}
read_lengths = Counter()
with open(fastqfile) as f:
for i in range(1000000):
header = f.readline()
if len(header)==0:
break
seq = f.readline()[:-1]
f.readline()
f.readline()
well_counts['total_counts'] += 1
read_lengths[len(seq)]+=1
if search_kmers(seq,rrna_sense_kmer_dict):
well_counts['rRNA_sense_counts'] += 1
elif search_kmers(seq,rrna_antisense_kmer_dict):
well_counts['rRNA_antisense_counts'] += 1
if search_kmers(seq,mt_rrna_sense_kmer_dict):
well_counts['mt_rRNA_sense_counts']+= 1
elif search_kmers(seq,mt_rrna_antisense_kmer_dict):
well_counts['mt_rRNA_antisense_counts'] += 1
read_len = max(read_lengths.keys())
rrna_fraction = (well_counts['rRNA_sense_counts']+well_counts['rRNA_antisense_counts'])/well_counts['total_counts']
mt_rrna_fraction = (well_counts['mt_rRNA_sense_counts']+well_counts['mt_rRNA_antisense_counts'])/well_counts['total_counts']
stat_dict = {}
with open(output_dir +'/split_sample/'+ sample_name+'_pipeline_stats.txt') as f:
for line in f:
k,v = line.strip().split('\t')
stat_dict[k] = int(v)
with open(output_dir +'/molecule_info/'+ sample_name+'_pipeline_stats.txt') as f:
for line in f:
k,v = line.strip().split('\t')
stat_dict[k] = int(v)
stat_dict['Estimated Number of Cells'] = len(barcodes)
stat_dict['Number of Reads'] = stat_dict['fastq_reads'] #contains all reads, including filtered cells and intergenic reads
stat_dict['Mean Reads/Cell'] = stat_dict['fastq_reads']/len(barcodes)
stat_dict['Sequencing Saturation'] = 1-df.shape[0]/df.counts.sum()
stat_dict['Reads Mapped to rRNA'] = rrna_fraction
stat_dict['Reads Mapped to mt-rRNA'] = mt_rrna_fraction
stat_dict['Reads Mapped to genome'] = stat_dict['total_read_count']/stat_dict['fastq_reads']
stat_dict['Reads Mapped to gene'] = stat_dict['mapped_to_gene']/stat_dict['total_read_count']
for s in species:
stat_dict['%s Fraction Reads in Cells' %s] = digital_count_matrix[:,species_gene_inds[s]].sum()/\
df.query('genome=="%s"' %s, engine='python').shape[0]
stat_dict['%s Median UMIs/Cell' %s] = np.median(species_umi_counts[s].iloc[np.where(species_assignments==s)])
#stat_dict['%s Median UMIs/Cell @50%% Dup' %s] = stat_dict['%s Median UMIs/Cell' %s] * 0.5 /stat_dict['Sequencing Saturation']
stat_dict['%s Median Genes/Cell' %s] = np.median(species_gene_counts[s].iloc[np.where(species_assignments==s)])
stat_dict['%s Number of Cells Detected' %s] = sum(species_assignments==s)
stat_dict['%s Exonic Fraction' %s] = df.loc[np.where(df.cell_barcode.isin(barcodes).values)].query('genome=="%s"' %s).exonic.mean()
stat_dict['Fraction Reads in Cells'] = digital_count_matrix.sum()/df.shape[0] # umis
stat_catagories = ['Estimated Number of Cells']
for s in species:
stat_catagories.append('%s Number of Cells Detected' %s)
for s in species:
stat_catagories.append('%s Median UMIs/Cell' %s)
#for s in species:
# stat_catagories.append('%s Median UMIs/Cell @50%% Dup' %s)
for s in species:
stat_catagories.append('%s Median Genes/Cell' %s)
stat_catagories += ['Mean Reads/Cell',
'Number of Reads',
'Sequencing Saturation',
'Reads Mapped to rRNA',
'Reads Mapped to mt-rRNA',
'Reads Mapped to genome',
'Reads Mapped to gene',
'Fraction Reads in Cells']
for s in species:
stat_catagories.append('%s Fraction Reads in Cells' %s)
for s in species:
stat_catagories.append('%s Exonic Fraction' %s)
# Save summary stats to csv
if overwrite:
analysis_dir=output_dir+'/analysis'
else:
analysis_dir=output_dir+'/'+analysis_dir
if not os.path.exists(analysis_dir):
os.makedirs(analysis_dir)
| pd.Series(stat_dict) | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with | tm.assertRaisesRegexp(ValueError, length_error) | pandas.util.testing.assertRaisesRegexp |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import calendar
import seaborn as sns
sns.set(style='white', palette='deep')
plt.style.use('grayscale')
warnings.filterwarnings('ignore')
width = 0.35
# Funções
def autolabel(rects,ax, df): #autolabel
for rect in rects:
height = rect.get_height()
ax.annotate('{} ({:.2f}%)'.format(height, height*100/df.shape[0]),
xy = (rect.get_x() + rect.get_width()/2, height),
xytext= (0,3),
textcoords="offset points",
ha='center', va='bottom', fontsize=15)
def autolabel_without_pct(rects,ax): #autolabel
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy = (rect.get_x() + rect.get_width()/2, height),
xytext= (0,3),
textcoords="offset points",
ha='center', va='bottom', fontsize=15)
def autolabel_horizontal(rects,ax):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
width = rect.get_width()
ax.text(rect.get_x() + rect.get_width()+3, rect.get_y() + rect.get_height()/2.,
'%.2f' % width,
ha='center', va='center', color='black', fontsize=15)
# Importando o Arquivo
df = | pd.read_excel('Banco de Dados - WDO.xlsx') | pandas.read_excel |
"""
Market Data Presenter.
This module contains implementations of the DataPresenter abstract class, which
is responsible for presenting data in the form of mxnet tensors. Each
implementation presents a different subset of the available data, allowing
different models to make use of similar data.
"""
from typing import Dict, List, Optional, Tuple
from abc import abstractmethod
import pandas as pd
import numpy as np
from mxnet import ndarray as nd
from . import providers, utils
class DataPresenter:
"""
Abstract class defining the DataProvider API.
"""
@abstractmethod
def get_training_batch(self, size: int):
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
@abstractmethod
def get_validation_batch(self, size: int):
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
@abstractmethod
def data_array(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
@abstractmethod
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
class IntradayPresenter:
"""
Loads data consisting only of intraday information, guaranteed to keep all
within market hours.
"""
# All it does is load data - no other calls necessary
# pylint: disable=too-few-public-methods
def __init__(self, provider: providers.DataProvider, *, window: int = 45,
valid_seed: int = 0, lookahead: int = 10,
normalize: bool = True, features: Dict[str, bool] = {},
**kwargs):
"""
Init function. Takes a +provider+ from which it extracts data and
a variety of other arguments. See info files for examples.
"""
# pylint: disable=too-many-instance-attributes
# Store basic setup parameters
self.provider = provider
self._window = window
self._valid_seed = valid_seed
self._lookahead = lookahead
self._normalize = normalize
self._features = [feat for feat in features if features[feat]]
self._outputs = []
# Collect and decide features
for feature in self._features:
# First handle special features
if feature == 'macd':
self._outputs.append('macd_signal')
if feature == 'vortex':
self._outputs.extend(['vortex+', 'vortex-'])
continue
if feature == 'stochastic':
self._outputs.extend(['%K', '%D'])
continue
if feature == 'williams':
self._outputs.append('%R')
continue
if feature == 'dysart':
self._outputs.extend(['pvi', 'nvi'])
continue
if feature == 'bollinger':
self._outputs.extend(['bollinger+', 'bollinger=', 'bollinger-'])
continue
# Then add all others
self._outputs.append(feature)
# Decide range of possible dates in advance
self._first = provider.first()
# TODO don't limit this anymore
self._latest = provider.latest() - pd.to_timedelta(2, unit='day')
# Cache for already processed data to cut down on disk usage
self._train_cache = {}
self._val_cache = {}
# Cache of holidays to prevent continuously recalculating them
self._holidays = utils.trading_holidays(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
self._half_days = utils.trading_half_days(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
def get_training_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
return self._get_batch(size, validation=False)
def get_validation_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
return self._get_batch(size, validation=True)
def data_array(self, timestamp: pd.Timestamp) -> nd.NDArray:
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
start_time = timestamp - pd.to_timedelta(self._window, unit='min')
return self._get_data(start_time, False)[0]
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
data = self._extract_daily_data(timestamp)
if data is None:
return None
return data.loc[timestamp, :]
def _get_data(self, time: pd.Timestamp, validation: bool) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a simgle data sample starting at a given +time+. Uses
+validation+ to distinguish between training and validation sets.
NOTE: This function assumes that the entire data window is available.
If a time provided is too late to obtain a full window, behavior
is UNPREDICTABLE.
"""
# Check if the sample has already been cached.
day = time.floor('D')
start_index = (time.hour - 9) * 60 + (time.minute - 30)
end_index = start_index + self._window
if validation and day in self._val_cache:
data, target = self._val_cache[day]
return data[start_index: end_index], target[start_index: end_index]
if not validation and day in self._train_cache:
data, target = self._train_cache[day]
return data[start_index: end_index], target[start_index: end_index]
# Otherwase generate, cache, and return it
data, target = self._to_daily_input_data(day)
if validation:
self._val_cache[day] = (data, target)
else:
self._train_cache[day] = (data, target)
return data[start_index: end_index], target[start_index: end_index]
def _to_daily_input_data(self, date: pd.Timestamp) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Transforms a set of intraday data for a +date+ to an array appropriate
for input to the model, and a target set of predictions against which
to compare outputs.
"""
# Gather data requested data components. Note that this seemingly
# over-complicated method guarantees that they remain in the order
# prescribed by the feature list.
datas = []
for feat in self._outputs:
if feat == "high":
datas.append(_to_intraday_high(date, self.provider,
normalize=self._normalize))
elif feat == "low":
datas.append(_to_intraday_low(date, self.provider,
normalize=self._normalize))
elif feat == "change":
datas.append(_to_intraday_change(date, self.provider,
normalize=self._normalize))
elif feat == "open":
datas.append(_to_intraday_open(date, self.provider,
normalize=self._normalize))
elif feat == "volume":
datas.append(_to_intraday_volume(date, self.provider,
normalize=self._normalize))
elif feat == "time":
datas.append(_to_intraday_time(date, self.provider,
normalize=self._normalize))
elif feat == "macd":
# For MACD, include both MACD and its signal
macd, macd_signal = _to_intraday_macd(date, self.provider,
normalize=self._normalize)
datas.extend([macd_signal, macd])
elif feat == "mass_index":
datas.append(_to_intraday_mass_index(date, self.provider))
elif feat == "trix15":
datas.append(_to_intraday_trix(date, self.provider, 15))
elif feat == "vortex+":
vortex_up, vortex_down = _to_intraday_vortex(date,
self.provider, 25)
datas.extend([vortex_up, vortex_down])
elif feat == "%K":
pK, pD = _to_intraday_stochastic(date, self.provider, 30)
datas.extend([pK, pD])
elif feat == "rsi":
datas.append(_to_intraday_rsi(date, self.provider, 14))
elif feat == "%R":
# The Williams %R is mathematically equivalent to (1 - %K). It
# is duplicated here to obtain a shorter period.
pK, _ = _to_intraday_stochastic(date, self.provider, 10)
datas.append(pK - 1)
elif feat == "accdist":
datas.append(_to_intraday_accdist(date, self.provider))
elif feat == "mfi":
datas.append(_to_intraday_mfi(date, self.provider, 30))
elif feat == "vpt":
datas.append(_to_intraday_vpt(date, self.provider))
elif feat == "obv":
datas.append(_to_intraday_obv(date, self.provider))
elif feat == "pvi":
pvi, nvi = _to_intraday_dysart(date, self.provider)
datas.extend([pvi, nvi])
elif feat == "bollinger+":
b_top, b_mid, b_bottom = _to_intraday_bollinger(date,
self.provider,
30, 2)
datas.extend([b_top, b_mid, b_bottom])
elif feat == "ultimate":
datas.append(_to_intraday_ultimate(date, self.provider))
elif feat == "cci":
datas.append(_to_intraday_cci(date, self.provider))
elif feat == "target":
datas.append(_to_intraday_target(date, self.provider,
self._lookahead,
normalize=self._normalize))
# Gather target data and return data/target arrays
target = _to_intraday_target(date, self.provider, self._lookahead,
normalize=self._normalize)
return nd.stack(*datas, axis=1), target.reshape(-1, 1)
def _extract_daily_data(self, date: pd.Timestamp) -> Optional[pd.DataFrame]:
"""
Gets the market data for a given day, restricted to market hours.
"""
data = self.provider.intraday(date)
if data is None or data.empty:
return None
return data
def _get_batch(self, batch_size: int, validation: bool = False) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Gets a random batch of data of size +batch_size+. Returns a tuple of
data and target predictions. If +validation+ is set, prevents these
dates from being drawn for non-validation batches.
"""
# Define a Callable for testing appropriate dates
def _is_suitable_time(time: pd.Timestamp) -> bool:
"""
Returns whether the market is open at a given +time+ for the
required window.
"""
# First, confirm that this date matches the right type
day = time.floor(freq='D')
is_validation_date = (day.dayofyear % 10 == self._valid_seed)
if validation != is_validation_date:
return False
# Ensure it's on weekdays and during market hours. Note that we
# discard the last 10 minutes of trading because they are both
# dangerous for day trading and provide no good way to train the
# 10 minute output for the model.
if time.weekday() > 4:
return False
if (time.hour * 60 + time.minute) < 9 * 60 + 30:
return False
if (time.hour * 60 + time.minute + self._window) > 15 * 60 - self._lookahead:
return False
# Check aginst holidays. Note that for the sake of sanity, we
# don't include half days.
if day in self._holidays or day in self._half_days:
return False
return True
# Next, generate arrays of random dates within the last two years,
# recording appropriate ones to form an array of size +batch_size+
timestamps = pd.Series()
while True:
random_times = pd.to_datetime(np.random.randint(low=self._first.value,
high=self._latest.value,
size=(100),
dtype='int64')).to_series()
suitable_mask = random_times.apply(_is_suitable_time)
timestamps = pd.concat([timestamps, random_times.loc[suitable_mask]])
if len(timestamps) >= batch_size:
timestamps = timestamps[0 : batch_size]
break
index_array = pd.to_datetime(timestamps)
# Next, gather all data into batches with axes (batch, window, data...)
datas, targets = [], []
for timestamp in index_array:
data, target = self._get_data(timestamp, validation)
datas.append(data)
targets.append(target)
data_array, target_array = nd.stack(*datas), nd.stack(*targets)
# Return the data
return data_array, target_array
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
return self._outputs
def _get_intraday_data(date: pd.Timestamp, provider: providers.DataProvider) \
-> pd.DataFrame:
"""
Gets the intraday datafrome limited to market hours for a given +date+
and +provider+.
"""
# First, get data and limit it to market hours
data = provider.intraday(date)
if data is None or data.empty:
raise RuntimeError(f"Something went wrong - empty data array for {date}!")
start = data.index[0].replace(hour=9, minute=30)
end = data.index[0].replace(hour=16, minute=0)
# Next, resample the data by the minute and interpolate missing values
data = data.loc[data.index.isin(pd.date_range(start=start, end=end, freq='min'))]
data = data.resample('min')
data = data.interpolate(method='time').copy()
return data
def _to_intraday_high(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
high = ((data.high - data.open) / data.open) if normalize else data.high
return nd.array(high.values, utils.try_gpu(0))
def _to_intraday_low(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
low = ((data.low - data.open) / data.open) if normalize else data.low
return nd.array(low.values, utils.try_gpu(0))
def _to_intraday_change(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute close of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
previous close
"""
data = _get_intraday_data(date, provider)
close_prev = data.close.shift(periods=1, fill_value=data.close[0])
close = ((data.close - close_prev) / close_prev) if normalize else data.close
return nd.array(close.values, utils.try_gpu(0))
def _to_intraday_open(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute open of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
daily open price.
"""
data = _get_intraday_data(date, provider)
open = (data.open / data.open.iloc[0]) if normalize else data.open
return nd.array(open.values, utils.try_gpu(0))
def _to_intraday_volume(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
average volume.
"""
data = _get_intraday_data(date, provider)
vol = data.volume / data.volume.mean() if normalize else data.volume
return nd.array(vol.values, utils.try_gpu(0))
def _to_intraday_time(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the trading minute of a data series for
a given +date+ and +provider+. If +normalize+, it is normalized so that
9:30 is 0 and 16:00 is 1
"""
data = _get_intraday_data(date, provider)
minute = data.index.hour * 60 + data.index.minute - (9 * 60 + 30)
tempus = (minute / (60 * 7 + 30)) if normalize else minute
return nd.array(tempus.values, utils.try_gpu(0))
def _to_intraday_macd(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndarrays consisting of the per-minute MACD of a data
series for a given +date+ and +provider+, and a signal for the same. If
normalize+, both are divided by the daily open price.
"""
# First, calculate the MACD via exponential moving averages
data = _get_intraday_data(date, provider)
ewm12 = pd.Series.ewm(data['close'], span=12).mean()
ewm26 = pd.Series.ewm(data['close'], span=26).mean()
macd = ewm26 - ewm12
# Next, calculate the signal line
signal = pd.Series.ewm(macd, span=9).mean()
# Return both
return nd.array(macd.values, utils.try_gpu(0)), \
nd.array(signal.values, utils.try_gpu(0))
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndarray containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = | pd.Series.ewm(data['close'], span=period) | pandas.Series.ewm |
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.index import Index, Int64Index
from pandas.tseries.frequencies import infer_freq, to_offset
from pandas.tseries.offsets import DateOffset, generate_range, Tick
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, datetime):
func = getattr(self, opname)
result = func(_to_m8(other))
elif isinstance(other, np.ndarray):
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = getattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if isinstance(other, timedelta):
func = getattr(self, opname)
return func(np.timedelta64(other))
else:
func = getattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeSeriesError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_groupby = lib.groupby_arrays # _wrap_i8_function(lib.groupby_int64)
_arrmap = _wrap_dt_function(_algos.arrmap_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not isinstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if isinstance(freq, basestring):
freq = to_offset(freq)
else:
if isinstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.to_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', copy=copy)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', copy=copy)
else:
subarr = tools.to_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_get_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_get_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', copy=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(isinstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(isinstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_to_pydatetime(self.asi8)
def __repr__(self):
from pandas.core.format import _format_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
summary = str(self.__class__)
if len(self) > 0:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
summary += tagline % (len(self), freq, self.tz)
return summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif len(state) == 3:
# legacy format: daterange
offset = state[1]
if len(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if isinstance(other, Index):
return self.union(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shift(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if isinstance(other, Index):
return self.diff(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shift(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.astype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.astype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, basestring):
freq = to_offset(freq)
return Index.shift(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shift with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not isinstance(other, DatetimeIndex) and len(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = | Index.intersection(self, other) | pandas.core.index.Index.intersection |
import asyncio
import sqlite3
import logging.config
from typing import List, Any
from datetime import datetime
from pathlib import Path
import aiohttp
import xmltodict
import yaml
import pandas as pd
from credentials.credentials import GOODREADS_KEY
# configuring logging
with open('log_config.yaml', 'r') as f:
log_config = yaml.safe_load(f.read())
logging.config.dictConfig(log_config)
logger = logging.getLogger(__name__)
async def check_goodreads(author: str):
'''Running requests within aiohttp session and writing results to database.'''
URL = 'https://www.goodreads.com/author/list.xml'
author_id = await get_author_id(author)
async with aiohttp.ClientSession() as session:
data = await get_books_data(session, URL, author_id)
df = | pd.DataFrame(data, columns=['book_id', 'book_title', 'title_without_series', 'publication_year', 'publication_month']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pdb
class ArucoCorner:
"""
Object which holds corner data for a specific aruco tag id
"""
def __init__(self, id_num, corners, data_attributes=None, file_folder=None):
"""
Creates the object
"""
# TODO: add aruco dictionary and params to this, so pose_detector can use it later
self.id = id_num
self.name = data_attributes # TODO: a dictionary that contains the relevant name data -> since different projects will have different attributes for the data
self.folder_loc = file_folder # location of the data, if none, it will do it in the current location
self.corners = corners
self.data_len = len(corners)
#self.corners_df = self.get_corners_df(corners)
def reshape_corners(self):
return self.corners.reshape(self.data_len, 8)
def gen_corners_df(self):
"""
Return corner data as a dataframe. Columns: x1, y1, ..., x4, y4
"""
reshaped_c = self.reshape_corners()
return | pd.DataFrame(reshaped_c, columns=["x1","y1","x2","y2","x3","y3","x4","y4"]) | pandas.DataFrame |
#libraries
import numpy as np
import pandas as pd
from datetime import datetime as dt
import time
import datetime
import os
import warnings
warnings.filterwarnings("ignore")
import logging
logging.basicConfig(filename='log.txt',level=logging.DEBUG, format='%(asctime)s %(message)s')
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
pd.set_option('max_rows', 100)
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
train = pd.read_csv('train.csv', dtype=dtypes)
test = pd.read_csv('test.csv', dtype=dtypes)
train["dataset"] = "train"
test["dataset"] = "test"
train = train.drop('HasDetections', axis=1)
df_row_merged = | pd.concat([train, test], ignore_index=True) | pandas.concat |
import pandas as pd
from collections import defaultdict
import os
import requirements
import numpy as np
import xmlrpc.client as xc
client = xc.ServerProxy('https://pypi.python.org/pypi')
packages = client.list_packages()
datadict = defaultdict(list)
with open('requirements.txt', 'r') as infile:
new_package = True
for line in infile:
if line.strip() == '':
new_package = True
print(package_name)
if package_name not in datadict['package']:
datadict['package'].append(package_name)
datadict['requirement'].append(np.nan)
continue
if new_package:
# If this is the case, the current line gives the name of the package
package_name = os.path.basename(line).strip()
new_package = False
else:
# This line gives a requirement for the current package
try:
print(line)
for req in requirements.parse(line.strip()):
datadict['package'].append(package_name)
datadict['requirement'].append(req.name)
except ValueError:
pass
# Convert to dataframe
df = | pd.DataFrame(data=datadict) | pandas.DataFrame |
# starpar.py
import numpy as np
import pandas as pd
from ..load_sim import LoadSim
from ..util.mass_to_lum import mass_to_lum
class StarPar():
@LoadSim.Decorators.check_pickle
def read_starpar_all(self, prefix='starpar_all',
savdir=None, force_override=False):
rr = dict()
for i in self.nums_starpar:
print(i, end=' ')
r = self.read_starpar(num=i, force_override=False)
if i == 0:
for k in r.keys():
rr[k] = []
for k in r.keys():
try:
rr[k].append(r[k].value.item())
except:
rr[k].append(r[k])
rr = | pd.DataFrame(rr) | pandas.DataFrame |
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import argparse
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from pyrocov.sarscov2 import aa_mutation_to_position
# compute moran statistic
def moran(values, distances, lengthscale):
assert values.size(-1) == distances.size(-1)
weights = (distances.unsqueeze(-1) - distances.unsqueeze(-2)) / lengthscale
weights = torch.exp(-weights.pow(2.0))
weights *= 1.0 - torch.eye(weights.size(-1))
weights /= weights.sum(-1, keepdim=True)
output = torch.einsum("...ij,...i,...j->...", weights, values, values)
return output / values.pow(2.0).sum(-1)
# compute moran statistic and do permutation test with given number of permutations
def permutation_test(values, distances, lengthscale, num_perm=999):
values = values - values.mean()
moran_given = moran(values, distances, lengthscale).item()
idx = [torch.randperm(distances.size(-1)) for _ in range(num_perm)]
idx = torch.stack(idx)
moran_perm = moran(values[idx], distances, lengthscale)
p_value = (moran_perm >= moran_given).sum().item() + 1
p_value /= float(num_perm + 1)
return moran_given, p_value
def main(args):
# read in inferred mutations
df = pd.read_csv("paper/mutations.tsv", sep="\t", index_col=0)
df = df[["mutation", "Δ log R"]]
mutations = df.values[:, 0]
assert mutations.shape == (2904,)
coefficients = df.values[:, 1] if not args.magnitude else np.abs(df.values[:, 1])
gene_map = defaultdict(list)
distance_map = defaultdict(list)
results = []
# collect indices and nucleotide positions corresponding to each mutation
for i, m in enumerate(mutations):
gene = m.split(":")[0]
gene_map[gene].append(i)
distance_map[gene].append(aa_mutation_to_position(m))
# map over each gene
for gene, idx in gene_map.items():
values = torch.from_numpy(np.array(coefficients[idx], dtype=np.float32))
distances = distance_map[gene]
distances = torch.from_numpy(np.array(distances) - min(distances))
gene_size = distances.max().item()
lengthscale = min(gene_size / 20, 50.0)
_, p_value = permutation_test(values, distances, lengthscale, num_perm=999999)
s = "Gene: {} \t #Mut: {} Size: {} \t p-value: {:.6f} Lengthscale: {:.1f}"
print(s.format(gene, distances.size(0), gene_size, p_value, lengthscale))
results.append([distances.size(0), gene_size, p_value, lengthscale])
# compute moran statistic for entire genome for mulitple lengthscales
for global_lengthscale in [100.0, 500.0]:
distances_ = [aa_mutation_to_position(m) for m in mutations]
distances = torch.from_numpy(
np.array(distances_, dtype=np.float32) - min(distances_)
)
values = torch.tensor(np.array(coefficients, dtype=np.float32)).float()
_, p_value = permutation_test(
values, distances, global_lengthscale, num_perm=999999
)
genome_size = distances.max().item()
s = "Entire Genome (#Mut = {}; Size = {}): \t p-value: {:.6f} Lengthscale: {:.1f}"
print(s.format(distances.size(0), genome_size, p_value, global_lengthscale))
results.append([distances.size(0), genome_size, p_value, global_lengthscale])
# save results as csv
results = np.stack(results)
columns = ["NumMutations", "GeneSize", "PValue", "Lengthscale"]
index = list(gene_map.keys()) + ["EntireGenome"] * 2
result = | pd.DataFrame(data=results, index=index, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from sklearn.metrics import classification_report, confusion_matrix
df = pd.read_csv("data/iris.csv")
df.head()
inputs_x = df[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']]
inputs_y = df['variety']
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
inputs_x_scaler = scaler.fit_transform(inputs_x.values)
df_scaler = pd.DataFrame(inputs_x_scaler, index=inputs_x.index, columns=inputs_x.columns)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_scaler.values, inputs_y, test_size=0.2, random_state=42)
from sklearn import preprocessing
encoder = preprocessing.LabelEncoder()
y_train = encoder.fit_transform(y_train)
y_test = encoder.fit_transform(y_test)
y_train = keras.utils.to_categorical(y_train, num_classes=3)
y_test = keras.utils.to_categorical(y_test, num_classes=3)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
model = Sequential()
model.add(Dense(100, activation='relu', input_dim=4))
model.add(Dense(3, activation='softmax'))
optimizer = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
trained_model = model.fit(X_train, y_train, epochs=300, batch_size=32)
test_result = model.evaluate(X_test, y_test, verbose=0)
print("Test accuracy: {}".format(test_result[1]))
y_pred = model.predict(X_test)
df_result = | pd.DataFrame.from_dict(trained_model.history) | pandas.DataFrame.from_dict |
import math
import requests
import os
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
import math
from datetime import datetime
from glob import glob
from datetime import timedelta
plt.style.use('ggplot')
from mpl_toolkits.basemap import Basemap
from igrf12py.igrf12fun import runigrf12, plotigrf
base_url = 'http://www.ndbc.noaa.gov/view_text_file.php?filename=%s&dir=data/historical/stdmet/'
# these are buoys within the drifter region that were active in 2012/2013
buoy_list = {
46002:(42.614, -130.490),
46005:(45.958, -131.000),
46011:(34.956, -121.019),
46012:(37.363, -122.881),
46013:(38.242, -123.301),
46015:(42.764, -124.832),
46029:(46.159, -124.514),
46047:(32.403, -119.536),
46061:(47.353, -124.731),
46087:(48.494, -124.728),
46089:(45.893, -125.819),
46211:(46.858, -124.244),
46229:(43.767, -124.549),
46239:(36.342, -122.102),
46246:(49.904, -145.243),
46089:(45.893, -125.819),
'cdea2':(56.388, -134.637)}
def compute_distance(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# multiply by radius of earth in km
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc*6371
def get_request(stationid, year, dirp=''):
fname = "%sh%s.txt.gz" %(stationid, year)
path = os.path.join(dirp, fname[:-3])
if not os.path.exists(path):
print("downloading from %s" %path)
rr = requests.get(base_url %(fname))
response = rr.text
if 'Unable to access' in response:
print("Unable to access data at %s" %rr.url)
return ''
fp = open(path, 'w')
fp.write(rr.text)
fp.close()
return path
def parse_request(path):
if not os.path.exists(path):
return ''
else:
ss = pd.read_csv(path,
delim_whitespace=True,
header=0,
skiprows=[1],
parse_dates={'buoy_datetime':['#YY', 'MM',
'DD', 'hh', 'mm']},
)
ss['buoy_datetime'] = pd.to_datetime(ss['buoy_datetime'],
format="%Y %m %d %H %M")
return ss
def get_all_buoys():
buoys = pd.DataFrame()
for buoyid in buoy_list.keys():
for yr in [2012, 2013]:
path = get_request(buoyid, yr, 'buoys')
bpd = parse_request(path)
if type(bpd) != str:
bpd['buoy_lat'] = buoy_list[buoyid][0]
bpd['buoy_lon'] = buoy_list[buoyid][1]
bpd['buoy_id'] = buoyid
buoys = buoys.append(bpd)
return buoys
def find_nearest_buoy(lat, lon):
buoy_dists = []
for buoy in buoy_list.keys():
blat, blon = buoy_list[buoy]
dd = compute_distance(blat, blon, lat, lon)
buoy_dists.append((buoy, dd))
buoys_sorted = sorted(buoy_dists, key=lambda x: x[1])
return buoys_sorted
#Convert julian day described in the data to datetime format
def convert_julian_frac(julian_frac, year):
"""
julian_frac is string in the form of a float
"""
frac, julian_day = math.modf(float(julian_frac)+1)
#The drifters reported both 0 and 356 for julian days in the calendar year
#When I get access to source code, I will try to determine which is correct
if int(julian_day) > 365:
julian_day = julian_day-365
year = int(year) + 1
mins, hrs = math.modf(frac*24.)
secs, mins = math.modf(mins*60)
usecs, secs = math.modf(secs*60)
dval= '%s %s %s %s %s' %(year, int(julian_day), int(hrs), int(mins), int(secs))
dtval = datetime.strptime(dval, '%Y %j %H %M %S')
return dtval
def load_data(fname, drifter_type, launch_datetime='2012-06-01 00:00:00', end_datetime='2014-06-01 00:00:00'):
"""Input the name of the drifter file downloaded from the website. This function parses the two types of data,
averaged measurements, M, and calibration measurements, C
"""
min_bat = 7
dval = open(fname, 'r')
#initialize battery voltage
bvalue = -1
grf12 = -1
cd = {"id":[], "cal_start_datetime":[], "sample_datetime":[], "num":[],
"x":[], "y":[], "z":[], "f":[], "temp":[], "lat":[], "lon":[], "bat":[],
}
md = {"id":[], "sample_datetime":[], "num":[],
"x":[], "y":[], "z":[], "f":[], "temp":[], "lat":[], "lon":[], "bat":[],
}
nsams = 250
calsx = [0]*nsams
calsy = [0]*nsams
calsz = [0]*nsams
do_return = False
cl = {"id":[], "cal_start_datetime":[], "calsx":[], "calsy":[], "calsz":[],
"temp":[], "lat":[], "lon":[], "bat":[]}
for line in dval:
line_vals = line.split(' ')
line_vals = [x for x in line_vals if x!='']
line_vals[-1] = line_vals[-1].strip()
if line_vals[0] == 'S':
# S: status line
# S 000000000066760 123 270 2011.08.13 23:03:06 47.651360 -129.042221 8
# S drifter_id rec_# day date time latitude longitude ?
# remove the S character
# this message is sent when the data is uploaded to the server
#mstatus = line_vals[1:]
d = pd.to_datetime("%s %s" %(line_vals[4], line_vals[5]))
S_drifter_id = line_vals[1]
S_record_num = line_vals[2]
S_datetime = d
S_lat = float(line_vals[6])
S_lon = float(line_vals[7])
if line_vals[0] == 'M':
# M: measurement
# M 1 -62475.9 -32540.4 -10721.9 19.39 47.9019 -128.9508 1.6 2011 224.80556 17.49
# M meas_# xf yf zf temperature latitude longitude ? yr decimal_julian _day
# convert julian day to something that is more easily readable
mdt = convert_julian_frac(line_vals[10], line_vals[9])
# now we have the measurement value, datetime, and battery values. This is the averaged vals
# Always use the lat/lon, temp included here for the averaged data
M_lat = float(line_vals[6])
M_lon = float(line_vals[7])
x, y, z, f = get_xyz(line_vals[2:5], drifter_type)
md['lat'].append(M_lat)
md['lon'].append(M_lon)
md['id'].append(S_drifter_id)
md["sample_datetime"].append(mdt)
md['x'].append(z)
md['y'].append(y)
md['z'].append(z)
md['f'].append(f)
md['temp'].append(float(line_vals[5]))
md['num'].append(int(line_vals[1]))
md['bat'].append(bvalue)
if line_vals[0] == 'C':
# The date reported here is always the start time of the sample period
# C: Calibration header
# C 8 2011 225.12518
# C id yr decimal_julian_day
jdt = convert_julian_frac(line_vals[3], line_vals[2])
# store the calibration value
Cid = line_vals[1]
# store the datetime
Cdf = jdt
if line_vals[0] == 'c':
# calibration measurement, add this to the header value for each value
# offset the time with the measurement frequency
# the first few and last few of this cal value seem to be bad
C_count = int(line_vals[1])
cdt = Cdf + timedelta(0, C_count)
x, y, z, f = get_xyz(line_vals[2:5], drifter_type)
ctemp = float(line_vals[5])
cd["sample_datetime"].append(cdt)
cd['x'].append(x)
cd['y'].append(y)
cd['z'].append(z)
cd['temp'].append(ctemp)
cd['num'].append(C_count)
cd['cal_start_datetime'].append(Cdf)
cd['bat'].append(bvalue)
cd['lat'].append(S_lat)
cd['lon'].append(S_lon)
cd['id'].append(Cid)
cd['f'].append(f)
if line_vals[0] == 'E':
# E:
# E 12.7 0
# E battery_voltage ?
bvalue = float(line_vals[1])
cpd = pd.DataFrame.from_dict(cd, orient='columns')
cpd = cpd.set_index('sample_datetime', drop=False)
# ignore data before june 1, this will need to be tailored for each drifter
cpd = cpd[cpd.index > pd.to_datetime(launch_datetime)]
cpd = cpd[cpd.index < pd.to_datetime(end_datetime)]
cpd = cpd[cpd['bat'] > min_bat]
#remove bad latitude data
cpd = cpd[cpd['lat'] != -90.0]
cpd = cpd[cpd['lat'] != 0.0]
mpd = pd.DataFrame.from_dict(md, orient='columns')
mpd = mpd.set_index('sample_datetime', drop=False)
# ignore data before june 1, this will need to be tailored for each drifter
mpd = mpd[mpd.index > pd.to_datetime(launch_datetime)]
mpd = mpd[mpd.index < pd.to_datetime(end_datetime)]
# theoretically, APS should be good down to 4.95 V (+4.95V to +12V. Input current is 40mA.)
mpd = mpd[mpd['bat'] > min_bat]
#remove bad latitude data
mpd = mpd[mpd['lat'] != -90.0]
mpd = mpd[mpd['lat'] != 0.0]
return mpd, cpd
def get_model(dt, lats, lons):
alt = 0
# set isv to 0 (main field) because that is what it is in example ??
isv = 0
# set itype to 1 (geodectic)
itype = 1
mx,my,mz,mf,yeardec = runigrf12(dt, isv, itype, [alt], lats, lons)
return mx[0], my[0], mz[0], mf[0], yeardec
def get_model_df(df):
xs = []
ys = []
zs = []
fs = []
alt = 0
# set isv to 0 (main field) because that is what it is in example ??
isv = 0
# set itype to 1 (geodectic)
itype = 1
for i in df.index:
x,y,z,f,yr = mx,my,mz,mf,yeardec = runigrf12(i, isv, itype, alt,
df.loc[i,'lat'], df.loc[i,'lon'])
xs.append(x[0])
ys.append(y[0])
zs.append(z[0])
fs.append(f[0])
df.loc[:,'igrfx'] = xs
df.loc[:,'igrfy'] = ys
df.loc[:,'igrfz'] = zs
df.loc[:,'igrff'] = fs
return df
def to_total_field(x, y, z):
"""convert to total magnetic field"""
return np.sqrt(x**2 + y**2 + z**2)
def fix_hmr(d):
k = np.sign(d)
dd = abs(int(d))*10
# convert to hex and discard last byte
hexd = "{0:#0{1}x}".format(dd,8)[:-2]
dd = int(hexd, 16)
# convert to nT
dd = (dd*6.6667)
return dd
def get_xyz(xyz, drifter_type="APS"):
x,y,z = [float(a) for a in xyz]
if drifter_type == 'HMR':
# the APS sensors were written in the wrong format, convert to correct
x = fix_hmr(x)
y = fix_hmr(y)
z = fix_hmr(z)
f = to_total_field(x, y, z)
return x, y, z, f
def get_buoy_data(buoypd, datetimes, lats, lons):
"""
:param datetimes: np array of panda datetimes
:param lats: np array of latitudes
:param lons: np array of longitudes
"""
datetimes = pd.to_datetime(datetimes)
data = pd.DataFrame()
for dt, lat, lon in zip(datetimes, lats, lons):
buoys_sorted = find_nearest_buoy(lat, lon)
for (buoy, dis) in buoys_sorted:
# if within 300 km, use this buoy
if dis < 500:
md = buoypd.loc[buoypd.loc[:,'buoy_id'] == buoy].copy(deep=True)
md['sample_datetime'] = dt
md['mins_diff'] = abs(md['sample_datetime']-md['buoy_datetime']).astype('timedelta64[m]')
min_time = md['mins_diff'].min()
## if nearest val within x minutes, use it, otherwise, look further away
if min_time < 180:
closest = md.loc[md.loc[:,'mins_diff'] == min_time]
data = data.append(closest)
# don't bother searching other distances
break
else:
# don't search farther away in sorted list
#print('no buoys close enough to', lat, lon)
break
data.index = data['sample_datetime']
return data
def parse_raw_files(drifter_data_dir, drifter_dict):
"""parse raw drifter files into provided dictionary and write to meas/cal/list files. This function is really slow, but only needs to be called once to write the parsed files.
:param drifter_data_dir: relative path where meas/cal/list_name.txt files are stored
:param drifter_dict: dictionary with names of drifter (ie sleepy) as key
"""
buoypd = get_all_buoys()
for dname in drifter_dict.keys():
dpath = os.path.join(drifter_data_dir, 'drifter_' + dname + '.txt')
print("Loading %s and writing measured and calibration data files" %dname)
drifter_type = drifter_dict[dname]['type']
mpd, cpd = load_data(dpath, drifter_type, drifter_dict[dname]['launch'], drifter_dict[dname]['end'])
mpd = get_model_df(mpd)
bpd = get_buoy_data(buoypd, mpd.index, mpd['lat'], mpd['lon'])
# join buoy data with drifter data
mbpd = pd.merge(mpd, bpd, left_index=True, right_index=True)
mpath = os.path.join(drifter_data_dir, 'meas_' + dname + '.txt')
mbpd.to_csv(mpath, header=True, sep=' ', index=True)
drifter_dict[dname]['meas'] = mbpd
cpd = get_model_df(cpd)
bpd = get_buoy_data(buoypd, cpd.index, cpd['lat'], cpd['lon'])
# join buoy data with drifter data
cbpd = pd.merge(cpd, bpd, left_index=True, right_index=True)
cpath = os.path.join(drifter_data_dir, 'cal_' + dname + '.txt')
cbpd.to_csv(cpath, header=True, sep=' ', index=True)
drifter_dict[dname]['cal'] = cbpd
return drifter_dict
def parse_txt_files(drifter_data_dir, drifter_dict):
"""load files previously produced by parse_raw_files
:param drifter_data_dir: relative path where meas/cal/list_name.txt files are stored
:param drifter_dict: dictionary with names of drifter (ie sleepy) as key
"""
for dname in drifter_dict.keys():
print("Loading %s meas, cal, and list data files" %dname)
mpath = os.path.join(drifter_data_dir, 'meas_' + dname + '.txt')
mpd = | pd.read_csv(mpath, header=0, sep=' ', parse_dates=0, index_col=0, low_memory=False) | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
from flask import *
import pandas as pd
import os
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from flask_ngrok import run_with_ngrok
import numpy as np
app = Flask(__name__)
run_with_ngrok(app)
basedir = os.path.abspath(os.path.dirname(__file__))
DIR = 'static/data/'
bitcoin_time_series = pd.read_csv(DIR + "cmc_plus_gold_fixed.csv", parse_dates = ['date'])
gtrend_time_series = pd.read_csv(DIR + "daily_gtrend_data_cmc.csv", parse_dates = ['date'])
dataset = bitcoin_time_series.copy()
dataset['gtrend'] = gtrend_time_series['bitcoin']
train_dates = dataset['date']
del gtrend_time_series
dataset = dataset.drop('date', axis = 1)
dataset = dataset.drop('index', axis = 1)
scaler = MinMaxScaler().fit(dataset)
dataset_scaled = scaler.transform(dataset)
@app.route('/')
def index():
loss_adam = pd.read_csv('static/data/loss_ogru/loss_history.csv')
loss_adam_h = pd.read_csv('static/data/loss_ogru/loss_history_H.csv')
loss_adam_hg = pd.read_csv('static/data/loss_ogru/loss_history_HG.csv')
loss_adam_htrend = pd.read_csv('static/data/loss_ogru/loss_history_HTrend.csv')
loss_sgd = pd.read_csv('static/data/loss_ogru/loss_history_sgd.csv')
loss_sgd_h = pd.read_csv('static/data/loss_ogru/loss_history_sgd_H.csv')
loss_sgd_hg = pd.read_csv('static/data/loss_ogru/loss_history_sgd_HG.csv')
loss_sgd_htrend = pd.read_csv('static/data/loss_ogru/loss_history_sgd_HTrend.csv')
label_loss_sgd = loss_sgd['index'].values
value_loss_sgd = loss_sgd['loss'].values
value_val_sgd_h = loss_sgd_h['val_loss'].values
value_val_sgd_hg = loss_sgd_hg['val_loss'].values
value_val_sgd_htrend = loss_sgd_htrend['val_loss'].values
mean_val_sgd = loss_sgd['val_loss'].mean()
mean_val_sgd_h = loss_sgd_h['val_loss'].mean()
mean_val_sgd_htrend = loss_sgd_htrend['val_loss'].mean()
mean_val_sgd_hg = loss_sgd_hg['val_loss'].mean()
value_val_sgd = loss_sgd['val_loss'].values
label_loss_adam = loss_adam['index'].values
value_loss_adam = loss_adam['loss'].values
value_val_adam = loss_adam['val_loss'].values
value_val_adam_h = loss_adam_h['val_loss'].values
value_val_adam_hg = loss_adam_hg['val_loss'].values
value_val_adam_htrend = loss_adam_htrend['val_loss'].values
mean_val = loss_adam['val_loss'].mean()
mean_val_h = loss_adam_h['val_loss'].mean()
mean_val_htrend = loss_adam_htrend['val_loss'].mean()
mean_val_hg = loss_adam_hg['val_loss'].mean()
return render_template('home.html',
value_loss_sgd=value_loss_sgd,
label_loss_sgd=label_loss_sgd,
label_loss_adam=label_loss_adam,
value_loss_adam=value_loss_adam,
value_val_sgd=value_val_sgd,
value_val_adam=value_val_adam,
value_val_adam_h=value_val_adam_h,
value_val_adam_hg=value_val_adam_hg,
value_val_adam_htrend=value_val_adam_htrend,
mean_val = mean_val,
mean_val_h=mean_val_h,
mean_val_htrend=mean_val_htrend,
mean_val_hg=mean_val_hg,
value_val_sgd_h=value_val_sgd_h,
value_val_sgd_hg=value_val_sgd_hg,
value_val_sgd_htrend=value_val_sgd_htrend,
mean_val_sgd = mean_val_sgd,
mean_val_sgd_h=mean_val_sgd_h,
mean_val_sgd_htrend=mean_val_sgd_htrend,
mean_val_sgd_hg=mean_val_sgd_hg,
)
@app.route('/predict', methods=['POST', 'GET'])
def predict():
if request.method == 'POST':
n_days_for_prediction = int(request.get_json())
n_past = n_days_for_prediction
# us_bd = CustomBusinessDay(calendar=USFederalHolidayCalendar())
# predict_period_dates = pd.date_range(list(train_dates)[-n_past], periods=n_days_for_prediction, freq=us_bd).tolist()
# print(predict_period_dates)
# predict_period_dates = list(train_dates[-n_days_for_prediction:])
forecast_period_dates = pd.date_range(list(train_dates)[-n_past], periods=(n_days_for_prediction*2), freq='1d').tolist()
model = load_model('static/data/model/my_model_with_gtrend_gold.h5')
model_sgd = load_model('static/data/model/model_sgd_HGoldTrend.h5')
trainX, trainY = sliding_window()
prediction = model.predict(trainX[-(n_days_for_prediction + n_past):])
prediction_sgd = model_sgd.predict(trainX[-(n_days_for_prediction + n_past):])
prediction_copies = np.repeat(prediction, dataset_scaled.shape[1], axis=-1)
y_pred_future = scaler.inverse_transform(prediction_copies)[:,0]
prediction_copies_sgd = np.repeat(prediction_sgd, dataset_scaled.shape[1], axis=-1)
y_pred_future_sgd = scaler.inverse_transform(prediction_copies_sgd)[:,0]
forecast_dates = []
for time_i in forecast_period_dates:
forecast_dates.append(time_i.date())
df_forecast = pd.DataFrame({'ogru_adam':y_pred_future, 'ogru':y_pred_future_sgd, 'predicted_date': forecast_dates})
# df_forecast['Date']=pd.to_datetime(df_forecast['Date'])
original = bitcoin_time_series[['date', 'open']]
original['date']=pd.to_datetime(original['date'])
original = original.loc[original['date'] >= '2021-8-1']
original_data = bitcoin_time_series[['date', 'open']][-n_days_for_prediction:]
result = pd.concat([original_data, df_forecast], axis=1)
return result.to_json()
else:
return 'Ops Something went wrong'
@app.route('/testpredict')
def testpredict():
n_past = 7
n_days_for_prediction = 30
us_bd = CustomBusinessDay(calendar= | USFederalHolidayCalendar() | pandas.tseries.holiday.USFederalHolidayCalendar |
from datetime import datetime
from typing import List
import pandas as pd
import pytest
from hyperwave import (
HyperwaveWeekLenghtGrouping,
HyperwavePhaseGrouper,
HyperwaveGroupingPhasePercent,
HyperwaveGroupingPhaseAggregator,
HyperwaveGroupingToPhase4,
HyperwaveGrouperByMedianSlopeIncrease,
HyperwaveGrouping,
HyperwaveGrouperSmallWeek,
)
def get_path_row(
x1: int = 0,
x1_date: datetime = datetime(2000, 1, 1),
x1_normalize: float = 0.0,
x2: int = 0,
x2_date: datetime = datetime(2000, 1, 1),
x2_normalize: float = 0.0,
y1: float = 0.0,
y1_normalize: float = 0.0,
y2: float = 0.0,
y2_normalize: float = 0.0,
m: float = 0.0,
b: float = 0.0,
m_normalize: float = 0.0,
b_normalize: float = 0.0,
angle: float = 0.0,
angle_normalize: float = 0.0,
weeks: int = 0,
mean_error: float = 0.0,
nb_is_lower: int = 0,
ratio_error_cut: float = 0.0,
ratio_slope_y1_normalize: float = 0.0,
ratio_slope_y2_normalize: float = 0.0,
):
return {
"x1": x1,
"x1_date": x1_date,
"x1_normalize": x1_normalize,
"x2": x2,
"x2_date": x2_date,
"x2_normalize": x2_normalize,
"y1": y1,
"y1_normalize": y1_normalize,
"y2": y2,
"y2_normalize": y2_normalize,
"m": m,
"b": b,
"m_normalize": m_normalize,
"b_normalize": b_normalize,
"angle": angle,
"angle_normalize": angle_normalize,
"weeks": weeks,
"mean_error": mean_error,
"nb_is_lower": nb_is_lower,
"ratio_error_cut": ratio_error_cut,
"ratio_slope_y1_normalize": ratio_slope_y1_normalize,
"ratio_slope_y2_normalize": ratio_slope_y2_normalize,
}
@pytest.mark.parametrize(
"raw_path, expected_phases, increase_factor, test_conment",
[
([get_path_row()], [[0]], 2.0, "one row return the row if greater than zero"),
(
[get_path_row(), get_path_row()],
[[0, 1]],
2.0,
"Two path with m_normalize equal zero should return an array with both element",
),
(
[get_path_row(m_normalize=-0.5), get_path_row(m_normalize=-1.0)],
[],
2.0,
"Path with only negative elements should return empty array",
),
(
[get_path_row(m_normalize=-0.5), get_path_row(m_normalize=1.0)],
[[1]],
2.0,
"Path with only one positive m_normalize should retunr an array with one element",
),
(
[get_path_row(m_normalize=0.5), get_path_row(m_normalize=0.7)],
[[0, 1]],
2.0,
"Path with two positive m_normalize without increase factor should return an array with both elements id",
),
(
[get_path_row(m_normalize=0.5), get_path_row(m_normalize=1.1)],
[[0], [1]],
2.0,
"Path with two positive m_normalize with increase factor greated should return an array with two array",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
],
[[0], [1, 2]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5] should return [[0],[1, 2]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
],
[[0], [1, 2]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5] should return [[0],[1, 2]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.2),
],
[[0], [1, 2, 3]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.2] should return [[0],[1, 2, 3]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.4),
],
[[0], [1, 2], [3]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.4] should return [[0],[1, 2], [3]]",
),
(
[
get_path_row(m_normalize=0.5),
get_path_row(m_normalize=1.1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=2.4),
get_path_row(m_normalize=10),
],
[[0], [1, 2], [3], [4]],
2.0,
"Path m_normalize [0.5, 1.1, 1.5, 2.4, 10] should return [[0],[1, 2], [3], [4]",
),
],
)
def test_that_grouping_return_expected_value(
raw_path, expected_phases, increase_factor, test_conment
):
df_path = pd.DataFrame(raw_path)
hw_phase_grouper = HyperwavePhaseGrouper(increase_factor)
phases = hw_phase_grouper.group(df_path)
assert expected_phases == phases, test_conment
@pytest.mark.parametrize(
"raw_path, input_group, expected_result, group_min_week, only_group_last_phase, test_comment",
[
(
[get_path_row(weeks=4)],
[[0]],
[[0]],
10,
True,
"one path with weeks lower than should return same input",
),
(
[get_path_row(weeks=4), get_path_row(weeks=4)],
[[1]],
[[1]],
10,
True,
"path with two input path but one group should return one group",
),
(
[get_path_row(weeks=10), get_path_row(weeks=4)],
[[0], [1]],
[[0, 1]],
10,
True,
"path with two input path and two groups should return one group",
),
(
[get_path_row(weeks=10), get_path_row(weeks=4), get_path_row(weeks=3)],
[[0], [1], [2]],
[[0, 1, 2]],
10,
True,
"initial group [[0], [1], [2]] with weeks [10, 4, 3] shoud return group [[0, 1, 2]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=3),
get_path_row(weeks=4),
],
[[0], [1], [2, 3]],
[[0], [1, 2, 3]],
10,
True,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=7),
get_path_row(weeks=4),
],
[[0], [1], [2], [3]],
[[0, 1], [2, 3]],
10,
False,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
(
[
get_path_row(weeks=10),
get_path_row(weeks=4),
get_path_row(weeks=7),
get_path_row(weeks=4),
],
[[0], [1], [2], [3]],
[[0], [1], [2, 3]],
10,
True,
"initial group [[0], [1], [2, 3]] with weeks [10, 4, 3, 4] shoud return group [[0], [1, 2, 3]]",
),
],
)
def test_grouping_second_step_week_base_when_all_weeks_are_enough_long(
raw_path,
input_group,
expected_result,
group_min_week,
only_group_last_phase,
test_comment,
):
df_path = pd.DataFrame(raw_path)
hw_week_lenght_grouping = HyperwaveWeekLenghtGrouping(
group_min_week, only_group_last_phase
)
result_group = hw_week_lenght_grouping.group(df_path, input_group)
assert expected_result == result_group
@pytest.mark.parametrize(
"raw_data, expected_result, percent_increase, test_comment",
[
([], [], 0.7, "Test empty array"),
(
[get_path_row(m_normalize=2.4)],
[[0]],
0.7,
"Test array with only one element",
),
(
[get_path_row(m_normalize=1), get_path_row(m_normalize=1.5)],
[[0, 1]],
1.7,
"Two elements in the same range",
),
(
[get_path_row(m_normalize=1), get_path_row(m_normalize=2.5)],
[[0], [1]],
1.7,
"Test array with two elements in different phase",
),
(
[
get_path_row(m_normalize=1),
get_path_row(m_normalize=1.5),
get_path_row(m_normalize=1.6),
get_path_row(m_normalize=3.9),
],
[[0, 1, 2], [3]],
1.7,
"Test array with 4 elements that increase into different phase",
),
(
[
get_path_row(m_normalize=0.353723),
get_path_row(m_normalize=0.476578),
get_path_row(m_normalize=1.276563),
get_path_row(m_normalize=1.601295),
get_path_row(m_normalize=7.864277),
get_path_row(m_normalize=11.429688),
get_path_row(m_normalize=11.589543),
get_path_row(m_normalize=80.007812),
],
[[0, 1], [2, 3], [4, 5, 6], [7]],
1.7,
"Test array with two elements in different phase",
),
],
)
def test_grouping_phase_percent_increase_no_increase(
raw_data, expected_result, percent_increase, test_comment
):
dF_path = pd.DataFrame(raw_data)
hw_grouping_phase_percent = HyperwaveGroupingPhasePercent(
percent_increase=percent_increase
)
result_group = hw_grouping_phase_percent.group(df_path=dF_path)
assert expected_result == result_group, test_comment
@pytest.mark.parametrize(
"raw_data, expected_result, group_aggregators, test_comment ",
[
([], [], [], "Test with everything empty"),
(
[get_path_row(m_normalize=1)],
[[0]],
[HyperwaveGroupingPhasePercent()],
"Test one grouping with one row",
),
(
[get_path_row(m_normalize=1, weeks=15)],
[[0]],
[HyperwaveGroupingPhasePercent(), HyperwaveWeekLenghtGrouping(10)],
"Test grouping with two grouping",
),
],
)
def test_grouping_phase_aggregator(
raw_data, expected_result, group_aggregators, test_comment
):
df_path = | pd.DataFrame(raw_data) | pandas.DataFrame |
import os
from pathlib import Path
from typing import List, Tuple, Optional, Sequence, Any, Union, Generator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import penguins as pg
from penguins import dataset as ds # for type annotations
class Experiment:
"""
Generic interface for experiments.
"""
default_margin = (0.5, 0.02) # applicable for 13C experiments
# This is overridden in subclasses.
# use (0.02, 0.02) for 1H experiments
# use (0.4, 0.05) for 15N experiments
def __init__(self,
peaks: List[Tuple[float, float]],
margin: Optional[Tuple[float, float]] = None,
):
self.peaks = peaks
self.margin = margin or self.default_margin
def integrate(self,
dataset: ds.Dataset2D,
) -> np.ndarray:
# Get absolute peak intensities for a given dataset.
return np.array([dataset.integrate(peak=peak,
margin=self.margin,
mode="max")
for peak in self.peaks])
def show_peaks(self, ax=None, **kwargs) -> None:
"""
Draw red crosses corresponding to each peak on an existing Axes
instance. Useful for checking whether the peaks actually line up with
the spectrum.
If 'ax' is not provided, defaults to currently active Axes.
Other kwargs are passed to ax.scatter().
"""
if ax is None:
ax = plt.gca()
scatter_kwargs = {"color": pg.color_palette("bright")[3],
"marker": "+", "zorder": 2}
scatter_kwargs.update(kwargs)
ax.scatter([p[1] for p in self.peaks], [p[0] for p in self.peaks],
**scatter_kwargs)
@property
def df(self) -> pd.DataFrame:
"""
Return a pandas DataFrame containing all the peaks. This DF has
columns "f1" and "f2".
"""
return pd.DataFrame.from_records(self.peaks, columns=("f1", "f2"))
def rel_ints_df(self,
dataset: ds.Dataset2D,
ref_dataset: ds.Dataset2D,
label: str = "",
) -> pd.DataFrame:
"""
Construct a dataframe of relative intensities vs a reference
dataset.
This DataFrame will have columns "f1", "f2", "expt", and "int".
"""
df = pd.DataFrame()
df["int"] = self.integrate(dataset) / self.integrate(ref_dataset)
df["expt"] = label
df["f1"] = self.df["f1"]
df["f2"] = self.df["f2"]
return df
class Hmbc(Experiment):
"""
For 13C HMBC experiments. Just call hmbc(peaks, margin) to instantiate.
"""
default_margin = (0.5, 0.02)
class NHsqc(Experiment):
"""
For 15N HSQC experiments. Just call nhsqc(peaks, margin) to instantiate.
"""
default_margin = (0.4, 0.05)
class Hsqc(Experiment):
"""
For 13C HSQC experiments. The variables ch, ch2, and ch3 should be
lists of 2-tuples (f1_shift, f2_shift) which indicate, well, CH, CH2,
and CH3 peaks respectively.
None of the methods from Experiment are actually inherited.
"""
def __init__(self,
ch: List[Tuple[float, float]],
ch2: List[Tuple[float, float]],
ch3: List[Tuple[float, float]],
margin: Optional[Tuple[float, float]] = (0.5, 0.02),
):
self.ch = ch
self.ch2 = ch2
self.ch3 = ch3
self.margin = margin
@property
def peaks(self) -> List[Tuple[float, float]]:
"""
Returns a list of all peaks.
"""
return self.ch + self.ch2 + self.ch3
@property
def df(self) -> pd.DataFrame:
"""
Return a pandas DataFrame containing all the peaks. This DF has
columns "f1", "f2", and "mult".
"""
_chdf, _ch2df, _ch3df = (
pd.DataFrame.from_records(peaklist, columns=("f1", "f2"))
for peaklist in (self.ch, self.ch2, self.ch3)
)
_chdf["mult"] = "ch"
_ch2df["mult"] = "ch2"
_ch3df["mult"] = "ch3"
return pd.concat((_chdf, _ch2df, _ch3df), ignore_index=True)
def integrate(self,
dataset: ds.Dataset2D,
edited: bool = False,
) -> np.ndarray:
"""
Calculates the absolute integral of each peak in the HSQC. Assumes that
CH/CH3 is phased to negative and CH2 to positive.
"""
if edited:
# We need self.df here as it contains multiplicity information.
return np.array([dataset.integrate(peak=(peak.f1, peak.f2),
margin=self.margin,
mode=("max"
if peak.mult == "ch2"
else "min"))
for peak in self.df.itertuples()])
else:
return np.array([dataset.integrate(peak=peak,
margin=self.margin,
mode=("max"))
for peak in self.peaks])
def rel_ints_df(self,
dataset: ds.Dataset2D,
ref_dataset: ds.Dataset2D,
label: str = "",
edited: bool = False,
) -> pd.DataFrame:
"""
Construct a dataframe of relative intensities vs a reference
dataset.
This DataFrame will have columns (f1, f2, mult) just like self.df,
but will also have "expt" which is a string indicating the type of
experiment being ran, and "int" which is the relative integral vs a
reference dataset.
"""
df = pd.DataFrame()
df["int"] = (self.integrate(dataset, edited=edited) /
self.integrate(ref_dataset, edited=edited))
df["expt"] = label
df["mult"] = self.df["mult"]
df["f1"] = self.df["f1"]
df["f2"] = self.df["f2"]
return df
class HsqcCosy(Experiment):
"""
For 13C HSQC-COSY experiments. The variables hsqc and cosy should be lists
of 2-tuples (f1_shift, f2_shift) which indicate the direct (HSQC) and
indirect (HSQC-COSY) responses respectively.
None of the methods from Experiment are actually inherited.
"""
def __init__(self,
hsqc: List[Tuple[float, float]],
cosy: List[Tuple[float, float]],
margin: Optional[Tuple[float, float]] = (0.5, 0.02),
):
self.hsqc = hsqc
self.cosy = cosy
self.margin = margin
@property
def peaks(self) -> List[Tuple[float, float]]:
"""
Returns a list of all peaks.
"""
return self.hsqc + self.cosy
@property
def df(self) -> pd.DataFrame:
"""
Return a pandas DataFrame containing all the peaks. This DF has
columns "f1", "f2", and "type".
"""
hsqc_df, cosy_df = (
pd.DataFrame.from_records(peaklist, columns=("f1", "f2"))
for peaklist in (self.hsqc, self.cosy)
)
hsqc_df["type"] = "hsqc"
cosy_df["type"] = "cosy"
return pd.concat((hsqc_df, cosy_df), ignore_index=True)
def integrate(self,
dataset: ds.Dataset2D,
edited: bool = True,
) -> np.ndarray:
"""
Calculates the absolute integral of each peak in the HSQC. If editing
is enabled, assumes that HSQC peaks are positive and HSQC-COSY peaks
negative.
"""
if edited:
# We need self.df here as it contains multiplicity information.
return np.array([dataset.integrate(peak=(peak.f1, peak.f2),
margin=self.margin,
mode=("max"
if peak.type == "hsqc"
else "min"))
for peak in self.df.itertuples()])
else:
return np.array([dataset.integrate(peak=peak,
margin=self.margin,
mode=("max"))
for peak in self.peaks])
def rel_ints_df(self,
dataset: ds.Dataset2D,
ref_dataset: ds.Dataset2D,
label: str = "",
edited: bool = True,
) -> pd.DataFrame:
"""
Construct a dataframe of relative intensities vs a reference
dataset.
This DataFrame will have columns (f1, f2, mult) just like self.df,
but will also have "expt" which is a string indicating the type of
experiment being ran, and "int" which is the relative integral vs a
reference dataset.
"""
df = pd.DataFrame()
df["int"] = (self.integrate(dataset, edited=edited) /
self.integrate(ref_dataset, edited=edited))
df["expt"] = label
df["type"] = self.df["type"]
df["f1"] = self.df["f1"]
df["f2"] = self.df["f2"]
return df
class Cosy(Experiment):
"""
For COSY experiments. The variables diagonal and cross_half should be
lists of 2-tuples (f1_shift, f2_shift). cross_half should only contain
half the peaks, i.e. only at (f1, f2) and not at (f2, f1). These will
be automatically reflected.
Only integrate() is actually inherited from Experiment.
"""
def __init__(self,
diagonal: List[Tuple[float, float]],
cross_half: List[Tuple[float, float]],
margin: Optional[Tuple[float, float]] = (0.02, 0.02),
):
self.diagonal = diagonal
self.cross_half = cross_half
self.margin = margin
@property
def cross(self) -> List[Tuple[float, float]]:
cross_otherhalf = [(t[1], t[0]) for t in self.cross_half]
# All crosspeaks
return self.cross_half + cross_otherhalf
@property
def peaks(self) -> List[Tuple[float, float]]:
return self.diagonal + self.cross
@property
def df(self) -> pd.DataFrame:
"""
Return a pandas DataFrame containing all the peaks. This DF has
columns "f1", "f2", and "type".
"""
_diagdf, _crossdf = (
| pd.DataFrame.from_records(peaklist, columns=("f1", "f2")) | pandas.DataFrame.from_records |
'''
NMF learns topics of documents
In the video, you learned when NMF is applied to documents, the components correspond to topics of documents, and the NMF features reconstruct the documents from the topics. Verify this for yourself for the NMF model that you built earlier using the Wikipedia articles. Previously, you saw that the 3rd NMF feature value was high for the articles about actors <NAME> and <NAME>. In this exercise, identify the topic of the corresponding NMF component.
The NMF model you built earlier is available as model, while words is a list of the words that label the columns of the word-frequency array.
After you are done, take a moment to recognise the topic that the articles about Anne Hathaway and Denzel Washington have in common!
INSTRUCTIONS
100XP
Import pandas as pd.
Create a DataFrame components_df from model.components_, setting columns=words so that columns are labeled by the words.
Print components_df.shape to check the dimensions of the DataFrame.
Use the .iloc[] accessor on the DataFrame components_df to select row 3. Assign the result to component.
Call the .nlargest() method of component, and print the result. This gives the five words with the highest values for that component.
'''
# Import pandas
import pandas as pd
# Create a DataFrame: components_df
components_df = | pd.DataFrame(model.components_, columns=words) | pandas.DataFrame |
#import the pandas library and aliasing as pd
import pandas as pd
import numpy as np
# Create an Empty DataFrame
df = pd.DataFrame()
print (df)
# Create a DataFrame from Lists
data = [1,2,3,4,5]
df = pd.DataFrame(data)
print (df)
data = [['Ankit',21],['Bob',24],['Clarke',20]]
df = pd.DataFrame(data,columns=['Name','Age'],dtype=float)
print (df)
# Create a DataFrame from Dict of ndarrays / Lists
data = {'Name':['Ankit', 'Gupta', 'Steve', 'Ricky'],'Age':[21,22,29,32]}
df = pd.DataFrame(data, index=['rank1','rank2','rank3','rank4'])
print (df)
# Create a DataFrame from List of Dicts
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df = pd.DataFrame(data)
print (df)
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df = pd.DataFrame(data, index=['first', 'second'])
print (df)
# Create a DataFrame from Dict of Series
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print (df)
# Column Addition
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
# Adding a new column to an existing DataFrame object with column label by passing new series
print ("Adding a new column by passing as Series:")
df['three']=pd.Series([10,20,30],index=['a','b','c'])
print (df)
print ("Adding a new column using the existing columns in DataFrame:")
df['four']=df['one']+df['three']
print (df)
# Column Deletion
print ("Deleting the first column using DEL function:")
del df['one']
print ("Deleting another column using POP function:")
df.pop('two')
print (df)
# Row Selection
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : | pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) | pandas.Series |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
| tm.assert_series_equal(result, exp) | pandas._testing.assert_series_equal |
import itertools
import pandas as pd
import numpy as np
from log import Log
def gen_EMA(data: pd.Series, n=20):
alpha = 1 / (n + 1)
EMA = []
for t in range(len(data.index)):
if t == 0:
EMA_t = data.iat[t]
else:
EMA_t = alpha * data.iat[t] + (1 - alpha) * EMA[-1]
EMA.append(EMA_t)
return EMA
def gen_RSI(data: pd.Series, n=14):
RSI = [None] * n
for t in range(n, len(data.index)):
sum_up = 0
sum_down = 0
for i in range(1, n):
sum_up += max(data.iat[t - i + 1] - data.iat[t - i], 0)
sum_down += min(data.iat[t - i + 1] - data.iat[t - i], 0)
avg_up = sum_up / n
avg_down = -sum_down / n
RSI_t = avg_up / (avg_up + avg_down)
RSI.append(RSI_t)
return pd.Series(RSI, index=data.index)
def gen_Stochastics(data: pd.Series, K_n=5, D=5, D2=3):
K = [0] * K_n
D_fast = [None] * K_n
D_slow = [None] * K_n
for t in range(K_n, len(data.index)):
L = min(filter(None, data[(t + 1 - K_n):(t + 1)]))
H = max(filter(None, data[(t + 1 - K_n):(t + 1)]))
K_t = 100 * (data.iat[t] - L) / (H - L)
K.append(K_t)
D_fast_t = sum(filter(None, K[-D:])) / D
D_fast.append(D_fast_t)
D_slow_t = sum(filter(None, D_fast[-D2:])) / D2
D_slow.append(D_slow_t)
return pd.Series(K, index=data.index), \
pd.Series(D_fast, index=data.index), \
pd.Series(D_slow, index=data.index)
def gen_Williams_R(data: pd.Series, n=10):
Williams_R = [None] * n
for t in range(n, len(data.index)):
L = min(filter(None, data[(t + 1 - n):(t + 1)]))
H = max(filter(None, data[(t + 1 - n):(t + 1)]))
R_t = (H - data.iat[t]) / (H - L) * -100
Williams_R.append(R_t)
return pd.Series(Williams_R, index=data.index)
def gen_price_rate_of_change(data: pd.Series, n=10):
proc = [None] * n
for t in range(n, len(data.index)):
proc_t = (data.iat[t] - data.iat[t - n]) / data.iat[t - n]
proc.append(proc_t)
return pd.Series(proc, index=data.index)
def gen_on_balance_volume(data: pd.DataFrame):
obv = [0]
for t in range(1, len(data.index)):
if data["Close"][t] > data["Close"][t - 1]:
obv_t = obv[-1] + data["Volume"][t]
elif data["Close"][t] < data["Close"][t - 1]:
obv_t = obv[-1] - data["Volume"][t]
else:
obv_t = obv[-1]
obv.append(obv_t)
return pd.Series(obv, index=data.index)
def gen_MACD(data: pd.Series, n_fast=12, n_slow=26, n_signal=9):
alpha_fast = 1 / (n_fast + 1)
alpha_slow = 1 / (n_slow + 1)
alpha_signal = 1 / (n_signal + 1)
EMA_fast = []
EMA_slow = []
MACD = []
Signal = []
for t in range(len(data.index)):
if t == 0:
EMA_fast_t = data.iat[t]
EMA_slow_t = data.iat[t]
MACD_t = EMA_fast_t - EMA_slow_t
Signal_t = MACD_t
else:
EMA_fast_t = alpha_fast * data.iat[t] + (1 - alpha_fast) * EMA_fast[-1]
EMA_slow_t = alpha_slow * data.iat[t] + (1 - alpha_slow) * EMA_slow[-1]
MACD_t = EMA_fast_t - EMA_slow_t
Signal_t = alpha_signal * MACD_t + (1 - alpha_signal) * Signal[-1]
EMA_fast.append(EMA_fast_t)
EMA_slow.append(EMA_slow_t)
MACD.append(MACD_t)
Signal.append(Signal_t)
return pd.Series(MACD, index=data.index), \
pd.Series(Signal, index=data.index)
def gen_CCI(data: pd.DataFrame, n=20):
CCI = [None] * n
TP = []
for t in range(len(data.index)):
TP_t = (data["High"].iat[t] + data["Low"].iat[t] + data["Close"].iat[t]) / 3
TP.append(TP_t)
if t >= n:
SMA = np.mean(TP[-n:])
MD = sum(abs(TP[-n:] - SMA)) / n
CCI_t = (TP_t - SMA) / (0.015 * MD)
CCI.append(CCI_t)
return pd.Series(CCI, index=data.index)
def gen_ATR(data: pd.DataFrame, n=14):
TR = []
ATR = [None] * n
for t in range(len(data.index)):
if t == 0:
continue
TR_t = max(data["High"].iat[t] - data["Low"].iat[t],
abs(data["High"].iat[t] - data["Close"].iat[t - 1]),
abs(data["Low"].iat[t] - data["Close"].iat[t - 1]))
TR.append(TR_t)
if t == n:
ATR_t = np.mean(TR)
ATR.append(ATR_t)
elif t > n:
ATR_t = (ATR[-1] * (n - 1) + TR_t) / n
ATR.append(ATR_t)
return pd.Series(ATR, index=data.index)
def gen_ADL(data: pd.DataFrame):
ADL = []
for t in range(len(data.index)):
ADL_t = ((data["Close"].iat[t] - data["Low"].iat[t]) - \
(data["High"].iat[t] - data["Close"].iat[t])) / \
(data["High"].iat[t] - data["Low"].iat[t]) * data["Volume"].iat[t]
if t == 0:
ADL.append(ADL_t)
else:
ADL.append(ADL[-1] + ADL_t)
return pd.Series(ADL, index=data.index)
def gen_returns(data: pd.DataFrame):
returns = [None]
log_returns = [None]
annualized_log_returns = [None]
monthly_log_returns = [None] * 21
quarterly_log_returns = [None] * 63
yearly_log_returns = [None] * 252
for t in range(1, len(data.index)):
return_t = (data["Close"].iat[t] - data["Close"].iat[t - 1]) / data["Close"].iat[t - 1]
log_return_t = np.log(data["Close"].iat[t] / data["Close"].iat[t - 1])
if t >= 21:
monthly_log_return_t = np.log(data["Close"].iat[t] / data["Close"].iat[t - 21])
monthly_log_returns.append(monthly_log_return_t)
if t >= 63:
quarterly_log_return_t = np.log(data["Close"].iat[t] / data["Close"].iat[t - 63])
quarterly_log_returns.append(quarterly_log_return_t)
if t >= 252:
yearly_log_return_t = np.log(data["Close"].iat[t] / data["Close"].iat[t - 252])
yearly_log_returns.append(yearly_log_return_t)
returns.append(return_t)
log_returns.append(log_return_t)
annualized_log_returns.append(252 * log_return_t)
return pd.Series(returns, index=data.index), \
pd.Series(log_returns, index=data.index), \
pd.Series(annualized_log_returns, index=data.index), \
pd.Series(monthly_log_returns, index=data.index), \
pd.Series(quarterly_log_returns, index=data.index), \
pd.Series(yearly_log_returns, index=data.index)
def gen_returns2(data: pd.DataFrame, delta=1):
returns = []
log_returns = []
ann_log_returns = []
for t in range(len(data.index) - delta):
return_t = (data["Close"].iat[t + delta] - data["Close"].iat[t]) / data["Close"].iat[t]
log_return_t = np.log(data["Close"].iat[t + delta] / data["Close"].iat[t])
ann_log_returns_t = log_return_t / delta * 252
returns.append(return_t)
log_returns.append(log_return_t)
ann_log_returns.append(ann_log_returns_t)
for i in range(delta):
returns.append(None)
log_returns.append(None)
ann_log_returns.append(None)
return pd.Series(returns, index=data.index), \
pd.Series(log_returns, index=data.index), \
pd.Series(ann_log_returns, index=data.index)
def gen_SimpleVola(data: pd.Series, days=14):
days_year = 252
vola = [None] * days
ann_vola = [None] * days
log_returns = [None]
ann_log_returns = [None]
for t in range(1, len(data.index)):
log_return_t = np.log(data.iat[t] / data.iat[t - 1])
log_returns.append(log_return_t)
ann_log_returns.append(log_return_t * days_year)
if t >= days:
vola.append(np.std(log_returns[-days:]))
ann_vola.append(np.std(ann_log_returns[-days:]))
return pd.Series(vola, index=data.index), \
pd.Series(ann_vola, index=data.index)
def gen_EWMA_Vola(data: pd.Series, n=14):
lambda_ = 0.94
days_year = 252
ewma_ann_vola = [None] * n
ann_log_returns = [None]
weights = []
for i in range(n):
weight = (1 - lambda_) * lambda_ ** i
weights.append(weight)
weights.reverse()
weights = np.array(weights)
var_t2_prev = None
for t in range(1, len(data.index)):
ann_log_return_t = np.log(data.iat[t] / data.iat[t - 1]) * days_year
ann_log_returns.append(ann_log_return_t)
if t >= n:
mean_t = np.mean(ann_log_returns[-n:])
var_t = sum(weights * (np.array(ann_log_returns[-n:]) - mean_t) ** 2) / n
if var_t2_prev is None:
var_t2 = var_t
else:
var_t2 = lambda_ * var_t2_prev + (1 - lambda_) * ann_log_return_t ** 2
var_t2_prev = var_t2
ewma_ann_vola.append(np.sqrt(var_t2))
return pd.Series(ewma_ann_vola, index=data.index)
def gen_YZ_Vola(data: pd.DataFrame, days=14):
days_year = 252
RS_fac = [None] # Rogers-Satchell
ON_fac = [None] # ON = overnight volatility
OC_fac = [None] # OC = open to close volatility
sigma_YZ = [None] * days
k = 0.34 / (1.34 + (days + 1) / (days - 1))
for t in range(1, len(data.index)):
RS_fac.append(np.log(data["High"].iat[t] / data["Close"].iat[t]) *
np.log(data["High"].iat[t] / data["Open"].iat[t]) +
np.log(data["Low"].iat[t] / data["Close"].iat[t]) *
np.log(data["Low"].iat[t] / data["Open"].iat[t]))
ON_fac.append(np.log(data["Open"].iat[t] / data["Close"].iat[t - 1]))
OC_fac.append(np.log(data["Close"].iat[t] / data["Open"].iat[t]))
if t >= days:
var_RS = days_year / days * np.sum(RS_fac[-days:])
ON_mean = np.mean(ON_fac[-days:])
var_ON = 1 / (days - 1) * np.sum((np.array(ON_fac[-days:]) - ON_mean) ** 2)
OC_mean = np.mean(OC_fac[-days:])
var_OC = 1 / (days - 1) * np.sum((np.array(OC_fac[-days:]) - OC_mean) ** 2)
sigma_YZ_t = np.sqrt(days_year) * np.sqrt(var_ON + k * var_OC + (1 - k) * var_RS)
sigma_YZ.append(sigma_YZ_t)
return pd.Series(sigma_YZ, index=data.index)
def gen_binary_response(data: pd.DataFrame, returns):
binary = [None]
for t in range(1, len(returns)):
if np.isnan(returns[t]):
binary.append(None)
elif returns[t] > 0:
binary.append(1)
else:
binary.append(0)
return pd.Series(binary, index=data.index)
def gen_tertiary_response(data: pd.DataFrame, returns, vola, days):
tertiary = [None]
upper_bound = 1/np.log(1 + days)
mid_bound = upper_bound / 3
for t in range(1, len(returns)):
if np.isnan(vola[t]) or np.isnan(returns[t]):
tertiary.append(None)
elif returns[t] > mid_bound * vola[t]:
tertiary.append(1)
elif -mid_bound * vola[t] < returns[t] <= mid_bound * vola[t]:
tertiary.append(0)
elif returns[t] <= -mid_bound * vola[t]:
tertiary.append(-1)
else:
raise ValueError("Invalid range for return: {}".format(returns[t]))
return pd.Series(tertiary, index=data.index)
def gen_multinomial_response(data: pd.DataFrame, returns, vola, days):
multinomial = [None]
upper_bound = 1/np.log(1 + days)
mid_bound = upper_bound / 3
for t in range(1, len(returns)):
if np.isnan(vola[t]) or np.isnan(returns[t]):
multinomial.append(None)
elif returns[t] > upper_bound * vola[t]:
multinomial.append(2)
elif mid_bound * vola[t] < returns[t] <= upper_bound * vola[t]:
multinomial.append(1)
elif -mid_bound * vola[t] < returns[t] <= mid_bound * vola[t]:
multinomial.append(0)
elif -upper_bound * vola[t] < returns[t] <= -mid_bound * vola[t]:
multinomial.append(-1)
elif returns[t] <= -upper_bound * vola[t]:
multinomial.append(-2)
else:
raise ValueError("Invalid range for return: {}".format(returns[t]))
return | pd.Series(multinomial, index=data.index) | pandas.Series |
# -*- coding: utf-8; py-indent-offset:4 -*-
import os, sys
import datetime as dt
import tabulate as tb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ..core import get_cn_fund_list, get_cn_fund_daily, get_cn_fund_manager, get_cn_fund_company, get_all_symbol_name, get_daily
from ..utils import dict_from_df, datetime_today, sort_with_options, filter_with_options, date_range_from_options, range_from_options, csv_xlsx_from_options, symbols_from_params
from ..core import LANG
def cli_fund_help():
syntax_tips = '''Syntax:
__argv0__ fund update <all | symbols | symbols.csv>
__argv0__ fund list <all | symbols | symbols.csv> [-include=... -exclude=... -same=...]
__argv0__ fund manager [<keyword>] [-s | -d] [-sortby=...] [-desc] [-filter_column=...-...]
__argv0__ fund company [<keyword>]
__argv0__ fund eval <all | symbols | symbols.csv> [-sortby=...] [-desc] [-filter_column=...-...]
__argv0__ fund plot <symbols | symbols.csv> [<options>]
__argv0__ fund backtest <all | symbols | symbols.csv> [-ref=...] [-days=...] [-date=yyyymmdd-yyyymmdd]
Options:
-sortby=<col> .................. sort by the column
-sharpe=2.5- ................... sharpe value between <from> to <to>
-drawdown_max=-20 .............. drawdown_max between <from> to <to>
-volatility=-20 ................ volatility between <from> to <to>
-out=file.csv .................. export fund list to .csv file
-out=file.xlsx ................. export fund data to .xlsx file
-s ............................. display symbol of the funds managed
-d ............................. display symbol and name of the funds managed
Example:
__argv0__ fund list
__argv0__ fund list -include=广发 -exclude=债 -out=output/myfunds.csv
__argv0__ fund pool 1.csv 2.csv -exclude=3.csv -same=4.csv -out=5.csv
__argv0__ fund update data/myfunds.csv
__argv0__ fund company 华安
__argv0__ fund manager -belongto=华夏基金
__argv0__ fund eval all -days=365 -sortby=sharpe -desc -limit=20 -out=output/top20_funds.xlsx
__argv0__ fund plot 002943 005669 000209 -days=365
__argv0__ fund plot data/funds.csv -days=365
__argv0__ fund plot data/funds.csv -years=3 -mix
__argv0__ fund backtest all -year=2018 -mix
__argv0__ fund backtest all -year=2010-2020 -mix
'''.replace('__argv0__',os.path.basename(sys.argv[0]))
print( syntax_tips )
def get_fund_symbol_name():
df = get_cn_fund_list(check_date= datetime_today())
return dict_from_df(df, 'symbol', 'name')
def get_fund_name_symbol():
df = get_cn_fund_list(check_date= datetime_today())
return dict_from_df(df, 'name', 'symbol')
def get_fund_company_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
return dict_from_df(df, 'fund', 'company')
def get_manager_size_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
df['manager'] = df['company'] + df['name']
return dict_from_df(df, 'manager', 'size')
def get_fund_manager_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
fund_manager = {}
for i, row in df.iterrows():
name = row['name']
fund = row['fund']
if fund in fund_manager:
if name not in fund_manager[ fund ]:
fund_manager[ fund ].append( name )
pass
else:
fund_manager[ fund ] = [ name ]
return fund_manager
def get_manager_fundname_mapping():
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + ' ' + row['name']
fund = row['fund']
if name in manager_fund:
manager_fund[ name ].append( fund )
else:
manager_fund[ name ] = [ fund ]
return manager_fund
def get_manager_fundsymbol_mapping():
fund_symbol = dict_from_df(get_cn_fund_list(check_date= datetime_today()), 'name', 'symbol')
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + row['name']
fund = row['fund']
symbol = fund_symbol[fund] if (fund in fund_symbol) else ''
if symbol == '':
continue
if name in manager_fund:
manager_fund[ name ].append( symbol )
else:
manager_fund[ name ] = [ symbol ]
return manager_fund
def get_manager_fund_mapping():
fund_symbol = dict_from_df(get_cn_fund_list(check_date= datetime_today()), 'name', 'symbol')
df = get_cn_fund_manager(check_date= datetime_today())
manager_fund = {}
for i, row in df.iterrows():
name = row['company'] + ' ' + row['name']
fund = row['fund']
symbol = fund_symbol[fund] if (fund in fund_symbol) else ''
if symbol == '':
continue
if name in manager_fund:
manager_fund[ name ].append( symbol + ' - ' + fund )
else:
manager_fund[ name ] = [ symbol + ' - ' + fund ]
return manager_fund
# hiquant fund list
# hiquant fund list -include=多因子
def cli_fund_list(params, options):
df = get_cn_fund_list(check_date= datetime_today())
selected = total = df.shape[0]
if len(params) > 0:
symbols = symbols_from_params(params)
df = df[ df['symbol'].isin(symbols) ]
for option in options:
if option.startswith('-exclude='):
keywords = option.replace('-exclude=','').split(',')
for k in keywords:
df = df[ ~ df['name'].str.contains(k) ]
pass
elif option.startswith('-include='):
keywords = option.replace('-include=','').split(',')
filters = None
for k in keywords:
filter = df['name'].str.contains(k, na=False)
if filters is None:
filters = filter
else:
filters = filters | filter
df = df[ filters ]
pass
elif option.startswith('-belongto='):
keyword = option.replace('-belongto=','')
if option.endswith('.csv'):
df_filter = pd.read_csv(keyword, dtype=str)
companies = df_filter['company'].tolist()
else:
companies = [ keyword ]
df_fund_manager = get_cn_fund_manager(check_date= datetime_today())
df_fund_manager = df_fund_manager[ df_fund_manager['company'].isin(companies) ]
funds = list(set(df_fund_manager['fund'].tolist()))
df = df[ df['name'].isin(funds) ]
pass
elif option.startswith('-managedby='):
keyword = option.replace('-managedby=','')
if option.endswith('.csv'):
df_filter = pd.read_csv(keyword, dtype=str)
df_filter['manager'] = df_filter['company'] + df_filter['name']
managers = df_filter['manager'].tolist()
else:
managers = [ keyword ]
df_fund_manager = get_cn_fund_manager(check_date= datetime_today())
df_fund_manager['manager'] = df_fund_manager['company'] + df_fund_manager['name']
df_fund_manager = df_fund_manager[ df_fund_manager['manager'].isin(managers) ]
funds = list(set(df_fund_manager['fund'].tolist()))
df = df[ df['name'].isin(funds) ]
pass
pass
df = filter_with_options(df, options)
df = sort_with_options(df, options, by_default='symbol')
selected = df.shape[0]
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'funds selected.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if out_csv_file:
df = df[['symbol', 'name']]
df.to_csv(out_csv_file, index= False)
print('Exported to:', out_csv_file)
print( tb.tabulate(df, headers='keys') )
if '-update' in options:
cli_fund_update(df['symbol'].tolist(), options)
if '-eval' in options:
cli_fund_eval(df['symbol'].tolist(), options)
return
if '-plot' in options:
cli_fund_plot(df['symbol'].tolist(), options + ['-man'])
# hiquant fund pool params -out=myfunds.csv
# hiquant fund pool params -same=other.csv -out=myfunds.csv
# hiquant fund pool params -exclude=other.csv -out=myfunds.csv
def cli_fund_pool(params, options):
df = get_cn_fund_list(check_date= datetime_today())
symbols = symbols_from_params(params)
df = df[ df['symbol'].isin(symbols) ].reset_index(drop= True)
for option in options:
if option.startswith('-same='):
other_arg = option.replace('-same=','')
other_symbols = symbols_from_params( [ other_arg ])
df = df[ df['symbol'].isin(other_symbols) ]
elif option.startswith('-exclude='):
other_arg = option.replace('-exclude=','')
other_symbols = symbols_from_params( [ other_arg ])
df = df[ ~ df['symbol'].isin(other_symbols) ]
print( tb.tabulate(df, headers='keys') )
df = filter_with_options(df, options)
df = sort_with_options(df, options, by_default='symbol')
range_from, range_to = range_from_options(options)
limit = range_to - range_from
if limit > 0:
df = df.head(limit)
out_csv_file, out_xlsx_file = csv_xlsx_from_options(options)
if out_csv_file:
df = df[['symbol', 'name']]
df.to_csv(out_csv_file, index= False)
print('Exported to:', out_csv_file)
if out_xlsx_file:
df = df[['symbol', 'name']]
df.to_excel(out_xlsx_file, index= False)
print('Exported to:', out_xlsx_file)
def cli_fund_company(params, options):
df = get_cn_fund_company()
limit = 0
yeartop = ''
manager_out_csv = ''
for k in options:
if k.startswith('-limit='):
limit = int(k.replace('-limit=',''))
if k.startswith('-yeartop='):
yeartop = k.replace('-yeartop=','')
if k.startswith('-manager_out=') and k.endswith('.csv'):
manager_out_csv = k.replace('-manager_out=','')
if yeartop:
df_top_managers = cli_fund_manager([], ['-yeartop='+yeartop])
df_yeartop = df_top_managers[['company']].groupby(['company']).size().reset_index(name='yeartopn')
company_yeartop = dict_from_df(df_yeartop, 'company', 'yeartopn')
df['yeartopn'] = [company_yeartop[c] if (c in company_yeartop) else 0 for c in df['company'].tolist()]
company_managers = {}
df_top_managers = df_top_managers.sort_values(by= 'best_return', ascending= False)
for i, row in df_top_managers.iterrows():
manager = row['name']
company = row['company']
if company in company_managers:
company_managers[company].append(manager)
else:
company_managers[company] = [ manager ]
df['names'] = ''
for i, row in df.iterrows():
company = row['company']
if company in company_managers:
names = ','.join( company_managers[company] )
df['names'].iloc[i] = names
if len(params) > 0:
if '.csv' in params[0]:
company_names = pd.read_csv(params[0], dtype=str)['company'].tolist()
df = df[ df['company'].isin(company_names) ]
else:
keyword = params[0]
df = df[ df['company'].str.contains(keyword, na=False) ]
selected = total = df.shape[0]
df = filter_with_options(df, options)
for k in options:
if k.startswith('-sortby='):
df = sort_with_options(df, options, by_default='managers')
if limit > 0:
df = df.head(limit)
selected = df.shape[0]
df = df.reset_index(drop= True)
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'fund companies.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if manager_out_csv:
table = []
for i, row in df.iterrows():
company = row['company']
names = row['names'].split(',')
for name in names:
table.append([company, name])
df_manager = pd.DataFrame(table, columns=['company','name'])
df_manager.to_csv(manager_out_csv, index=False)
print('Managers exported to:', manager_out_csv)
if out_csv_file:
df_com = df[['company']]
df_com.to_csv(out_csv_file, index=False)
print( tb.tabulate(df_com, headers='keys') )
print('Exported to:', out_csv_file)
if out_xls_file:
df_com = df.rename(columns= {
'company_start': '成立日期',
'size': '管理规模\n(亿)',
'funds': '基金\n总数',
'managers': '基金经理\n人数',
'yeartopn': '业绩前列\n经理人数',
'names': '业绩优秀 基金经理 姓名',
})
del df_com['update_date']
df_com.to_excel(excel_writer= out_xls_file)
print( tb.tabulate(df_com, headers='keys') )
print('Exported to:', out_xls_file)
if '-plot' in options:
df_company_tmp = df.copy()
for i, row in df_company_tmp.iterrows():
company = row['company']
cli_fund_list([], options + ['-belongto=' + company, '-exclude=C,债,FOF,QDII,LOF', '-eval', '-one_per_manager', '-limit=10', '-png=output/' + company + '.png'])
def get_fund_area(name):
fund_areas = {
'QDII': ['QDII','美国','全球','现钞','现汇','人民币','纳斯达克','标普'],
'ETF': ['ETF','指数','联接'],
'债券': ['债'],
'量化': ['量化'],
'新能源': ['能源','双碳','低碳','碳中和','新经济','环保','环境','气候','智能汽车'],
'高端制造': ['制造','智造','战略','新兴产业'],
'信息技术': ['信息','互联网','芯片','半导体','集成电路','云计算'],
'医疗': ['医疗','养老','医药','健康'],
'军工': ['军工','国防','安全'],
'消费': ['消费','品质','白酒'],
'周期': ['周期','资源','钢铁','有色','金融','地产'],
'中小盘': ['中小盘','成长','创新'],
'价值': ['蓝筹','价值','龙头','优势','核心'],
'灵活配置': ['灵活配置','均衡'],
}
for k in fund_areas:
keywords = fund_areas[k]
for kk in keywords:
if kk in name:
return k
return ''
def cli_fund_manager(params, options):
df = get_cn_fund_manager(check_date= datetime_today())
selected = total = df.shape[0]
if len(params) > 0:
keyword = params[0]
if ',' in keyword:
keyword = keyword.replace(',', '')
if keyword.endswith('.csv') or keyword.endswith('.xlsx'):
df_filter = pd.read_csv(keyword, dtype= str) if keyword.endswith('.csv') else pd.read_excel(keyword, dtype= str)
if 'name' in df_filter.columns:
df_filter['manager'] = df_filter['company'] + df_filter['name']
df1 = df.copy()
df1['manager'] = df['company'] + df['name']
df = df1[ df1['manager'].isin(df_filter['manager'].tolist()) ].drop(columns=['manager'])
else:
df = df[ df['company'].isin(df_filter['company'].tolist()) ]
else:
df1 = df.copy()
df1['keywords'] = df1['company'] + df1['name'] + ' ' + df1['fund']
df = df1[ df1['keywords'].str.contains(keyword, na=False) ].drop(columns=['keywords'])
yeartop = ''
limit = 0
belongto = ''
for k in options:
if k.startswith('-limit='):
limit = int(k.replace('-limit=',''))
if k.startswith('-yeartop='):
yeartop = k.replace('-yeartop=', '')
if k.startswith('-fund='):
fund = k.replace('-fund=','')
df = df[ df['fund'].str.contains(fund, na=False) ]
if k.startswith('-belongto='):
belongto = k.replace('-belongto=', '')
df_company = get_cn_fund_company()
company_managers = dict_from_df(df_company, 'company', 'managers')
company_funds = dict_from_df(df_company, 'company', 'funds')
group = '-f' not in options
if group and (df.shape[0] > 0):
df_tmp = df.drop(columns=['fund'])
table = []
name = ''
for i, row in df_tmp.iterrows():
c = row['company']
manager = c + row['name']
if name == manager:
continue
else:
name = manager
data = list(row.values)
managers = company_managers[c] if (c in company_managers) else 0
funds = company_funds[c] if (c in company_funds) else 0
data.insert(2, managers)
data.insert(3, funds)
table.append( data )
cols = list(row.keys())
cols.insert(2, 'managers')
cols.insert(3, 'funds')
df = pd.DataFrame(table, columns=cols)
df['annual'] = round((np.power((df['best_return'] * 0.01 + 1), 1.0/(np.maximum(365.0,df['days'])/365.0)) - 1.0) * 100.0, 1)
if yeartop:
df1 = df[ df['days'] >= 3650 ].sort_values(by='best_return', ascending=False)
if '%' in yeartop:
yeartopn = int(yeartop.replace('%','')) * df1.shape[0] // 100
else:
yeartopn = int(yeartop)
df1 = df1.head(yeartopn)
for i in range(9,0,-1):
df2 = df[ (df['days'] >= (i*365)) & (df['days'] < ((i+1))*365) ].sort_values(by='best_return', ascending=False)
if '%' in yeartop:
yeartopn = int(yeartop.replace('%','')) * df2.shape[0] // 100
else:
yeartopn = int(yeartop)
df2 = df2.head( yeartopn )
df1 = pd.concat([df1, df2], ignore_index=True)
df = df1
df.insert(5, 'years', round(df['days'] / 365.0, 1))
selected = total = df.shape[0]
df = filter_with_options(df, options)
if belongto:
if belongto.endswith('.csv'):
belongto = pd.read_csv(belongto, dtype= str)['company'].tolist()
elif ',' in belongto:
belongto = belongto.split(',')
else:
belongto = [ belongto ]
df = df[ df['company'].isin(belongto) ]
for k in options:
if k.startswith('-sortby='):
df = sort_with_options(df, options, by_default='best_return')
break
if limit > 0:
df = df.head(limit)
if 'fund' in df.columns:
fund_name_symbol = get_fund_name_symbol()
df.insert(2, 'symbol', [(fund_name_symbol[fund] if fund in fund_name_symbol else '') for fund in df['fund'].tolist()])
df = df[ df['symbol'] != '' ]
elif ('-s' in options) and ('name'in df.columns):
manager_fundsymbol = get_manager_fundsymbol_mapping()
managers = (df['company'] + df['name']).tolist()
df['symbol'] = [(','.join(manager_fundsymbol[manager]) if manager in manager_fundsymbol else '') for manager in managers]
elif ('-sd' in options) and ('name'in df.columns):
manager_fund = get_manager_fund_mapping()
managers = (df['company'] + df['name']).tolist()
df['fund'] = [('\n'.join(manager_fund[manager]) if manager in manager_fund else '') for manager in managers]
df['area'] = df['fund'].apply(get_fund_area)
df = df.reset_index(drop= True)
selected = df.shape[0]
print( tb.tabulate(df, headers='keys') )
print( selected, 'of', total, 'selected.')
out_csv_file, out_xls_file = csv_xlsx_from_options(options)
if out_csv_file:
df_csv = df[['name','company']]
df_csv.to_csv(out_csv_file, index= False)
print( tb.tabulate(df_csv, headers='keys') )
print('Exported to:', out_csv_file)
if out_xls_file:
if 'days' in df.columns:
df = df.drop(columns=['days'])
df = df.rename(columns= {
'managers': '基金经理人数',
'funds': '基金总数',
'fund': '基金',
'area': '投资方向',
'years': '管理年限',
'size': '基金规模',
'best_return': '最佳回报',
'annual': '年化收益',
})
df.to_excel(excel_writer= out_xls_file)
print( tb.tabulate(df, headers='keys') )
print('Exported to:', out_xls_file)
if '-plot' in options:
for i, row in df.iterrows():
manager = row['company'] + row['name']
cli_fund_show([manager], ['-png=output/' + manager + '.png'])
return df
def cli_fund_read_fund_symbols(excel_file):
if excel_file.endswith('.csv'):
df = pd.read_csv(excel_file, dtype=str)
elif excel_file.endswith('.xlsx'):
df = | pd.read_excel(excel_file, dtype=str) | pandas.read_excel |
import pandas as pd
import datetime
import numpy as np
import icd
def get_age(row):
"""Calculate the age of patient by row
Arg:
row: the row of pandas dataframe.
return the patient age
"""
raw_age = row['DOD'].year - row['DOB'].year
if (row['DOD'].month < row['DOB'].month) or ((row['DOD'].month == row['DOB'].month) and (row['DOD'].day < row['DOB'].day)):
return raw_age - 1
else:
return raw_age
mimic_patients = 'mimic_csv/PATIENTS.csv'
mimic_note_events = 'mimic_csv/NOTEEVENTS.csv'
mimic_admissions = 'mimic_csv/ADMISSIONS.csv'
mimic_diagnoses = 'mimic_csv/DIAGNOSES_ICD.csv'
patient = pd.read_csv(mimic_patients)
patient['DOD'] = pd.to_datetime(patient['DOD']).dt.date
patient['DOB'] = | pd.to_datetime(patient['DOB']) | pandas.to_datetime |
#!/usr/bin/python
import sys, os;
import argparse;
from os.path import expanduser;
import pandas as pd;
import math;
from datetime import datetime as dt;
from datetime import timedelta;
__author__ = "<NAME>"
def main():
parser = argparse.ArgumentParser(description="This script normalizes the Binance buy history to a simpler format for other scripts to process");
parser.add_argument("-bh", "--buyhistory", type=str, help="The input xlsx file cotaining your Binance buy history", required=False, default = './buy-history.xlsx');
parser.add_argument("-nh", "--normalizedhistory", type=str, help="The output xlsx file cotaining your normalized trade history", required=False, default = './buy-history-normalized.xlsx');
parser.add_argument("-fx", "--foreignexchange", type=str, help="The CAD-to-USD exchange rate chart", required=False, default = './trade/cad-usd.xlsx');
parser.add_argument("-v", "--verbose", help="Whether to output verbose output messages", required=False, default=False);
args = parser.parse_args();
print("Input Buy History file: ", args.buyhistory);
print("Input Currency Exchange rate file: ", args.foreignexchange);
print("Output Normalized History file: ", args.normalizedhistory);
print("Verbosity of log messages: ", args.verbose);
buyhistoryDfs = pd.read_excel(args.buyhistory, sheet_name="sheet1")
inputFxDfs = | pd.read_excel(args.foreignexchange, sheet_name="sheet1") | pandas.read_excel |
from datetime import timedelta
import numpy as np
import pandas as pd
import pickle
def generate_data(df, freq: str, scenario: int, regr_vars = None,
multiplier = None,
baseline = None,
look_back = None,
look_ahead = None):
'''
freq: either 'D' or 'H'
'''
X = df
y = df['target_data']
X = X.drop(columns=['target_data'])
if regr_vars:
regression_variables = regr_vars
else:
regression_variables = list(X.columns)
if multiplier:
multiplier = multiplier
else:
multiplier = list(np.ones(len(regression_variables), dtype=int))
if baseline:
baseline_values = baseline
else:
baseline_values = list(np.zeros(len(regression_variables),dtype=int))
if look_back:
look_back = look_back
else:
look_back = list(np.zeros(len(regression_variables),dtype=int))
if look_ahead:
look_ahead = look_ahead
else:
look_ahead = list(np.zeros(len(regression_variables),dtype=int))
if freq == 'D':
# Synchronize X and y data by ensuring consistency of timestamps and all
# 'na' values dropped
X = X[regression_variables]
X['y_data']=y
X = X.dropna()
y=X['y_data']
X = X.drop(columns=['y_data'], axis=1)
Xp = X.copy() # Use 'Xp' as a temporary dataframe for processing to follow
for i in range(len(regression_variables)): # For each regression_variable
if baseline_values[i]>0: # Apply shift in data values based on baseline_values list
Xp[regression_variables[i]]=Xp[regression_variables[i]]-baseline_values[i]
if multiplier[i]>=1: # Take daily mean of hourly data or daily mean-of-squared hourly data as per multiplier list
if multiplier[i]==2:
Xp[regression_variables[i]]=Xp[regression_variables[i]]**2
Xp[regression_variables[i]]=Xp[regression_variables[i]].resample('D').sum()
elif multiplier[i]==-999: # Take daily mean of hourly data
Xp[regression_variables[i]]=Xp[regression_variables[i]].resample('D').mean()
elif multiplier[i]==-888: # Take daily mean, max, and minimum of hourly data
maxstr = regression_variables[i]+'_max'
minstr = regression_variables[i]+'_min'
regression_variables.append(maxstr)
look_back.append(look_back[i])
look_back.append(look_back[i])
look_ahead.append(look_ahead[i])
look_ahead.append(look_ahead[i])
Xp[maxstr] = Xp[regression_variables[i]].resample('D').max()
regression_variables.append(minstr)
Xp[minstr] = Xp[regression_variables[i]].resample('D').min()
Xp[regression_variables[i]]=Xp[regression_variables[i]].resample('D').mean()
elif multiplier[i]==-777: # Take daily mean, and max of hourly data
maxstr = regression_variables[i]+'_max'
regression_variables.append(maxstr)
look_back.append(look_back[i])
look_ahead.append(look_ahead[i])
Xp[maxstr] = Xp[regression_variables[i]].resample('D').max()
Xp[regression_variables[i]]=Xp[regression_variables[i]].resample('D').mean()
Xp=Xp.resample('D').sum() # Xp will not be fully resampled to daily values, so this cleans everything up.
# Apply look ahead and look back values
for i in range(len(regression_variables)):
if look_back[i]>0:
for x in range(look_back[i]):
header_str = 'last_'+regression_variables[i]+'_'+str(x+1)
Xp[header_str]=Xp[regression_variables[i]].shift(x+1)
if look_ahead[i]>0:
for x in range(look_ahead[i]):
header_str = 'next_'+regression_variables[i]+str(x+1)
Xp[header_str]=Xp[regression_variables[i]].shift(-x-1)
X = Xp # Reframe X based on Xp
if scenario == 1:
# Apply ASHRAE Adaptive Comfort model criteria to classify hot days vs. non-hot days
# The Adaptive Comfort model is a standard model used to predict the minimum and maximum indoor air temperatures
# that are typically considered by building occupants to yield comfortable indoor conditions. It applies
# only to naturally-ventilated buildings, which is fine as this is what CIRS is.
# It is a simple model, whereby Threshold_indoor_temp = f(Outdoor_Temp) only
# See the CBE thermal comfort tool for more info: http://comfort.cbe.berkeley.edu
#
# For this iteration, we're using the equation that denotes the upper bound of the 90% accepability
# limit for indoor temperature, meaning that 10% of typical building occupants will be uncomfortable
# at indoor levels above what's stated. We evaluate the equation based on mean monthly outdoor air temperature:
# Threshold_operative_temperature = 0.31 * Mean_monthly_outdoor_T + 20.3 [all in deg C]
#
# The code below calculates how many days in a summer do indoor air temperature, at any hour of the day, exceed
# the threshold temperature limit;
#
# As the adaptive comfort model is based on operative temperautre, and we do not yet have this knowledge in full,
# we will assume that the daytime operative temperature in CIRS at peak hot-weather hours is 1 deg C above
# the measured air temperature. This will be verified at a future date.
ACupper = X['temp'].copy()
ACupper=ACupper.resample('M').mean().resample('D').ffill().reset_index()
ACupper=ACupper.set_index(pd.DatetimeIndex(ACupper['index']))
ACupper=ACupper.drop(columns=['index'])
ACupper=ACupper.multiply(0.33).add(20.8)
ACexceed=0-ACupper.sub(y.resample('D').max(),axis=0)
ACexceed[ACexceed<=0]=0
ACexceed[ACexceed>0]=1
yp = ACexceed
elif scenario == 2:
yp=y[y>27].resample('D').count()
yp[yp>0]=1
# Re-synchronize X and y data by ensuring consistency of timestamps and all 'na' values dropped
X['y_data']=yp
X=X.dropna()
y=X['y_data']
# if corr_plot:
# import matplotlib.pyplot as plt
# import seaborn as sns
# plt.figure()
# sns.heatmap(X.corr())
# X = X.dropna()
# y=X['y_data']
# # Print correlation values to help hint at what regression parameters to choose
# CorrMatrix=pd.DataFrame(X.corr()['y_data'])
# print(CorrMatrix.sort_values(by=['y_data']))
#
X = X.drop(columns=['y_data'], axis=1)
if freq == 'H':
X_historyKeys=['solar_radiation','temp','wind_dir','hum_ratio','hours',
'windspeed']
X_lookback=[6,24,4,4,0,2]#,2]
X_lookahead=[0,2,2,0,0,0]#,2]
X_drop=[0,0,0,0,0,0]
for i in range(len(X_historyKeys)):
for j in range(X_lookback[i]):
header_str='last_'+X_historyKeys[i]+'_'+str(j+1)
X[header_str]=X[X_historyKeys[i]].shift(j+1)
for j in range(X_lookahead[i]):
header_str='next_'+X_historyKeys[i]+'_'+str(j+1)
X[header_str]=X[X_historyKeys[i]].shift(-1-j)
if X_drop[i]==1:
X=X.drop(columns=[X_historyKeys[i]])
# # Add in is weekend, rolling std features
# weekends = np.where(X.index.dayofweek-5>=0, 1, 0)
# X['is_weekend'] = weekends
# X['rolling_std_4'] = X['temp'].rolling(4).std()
# X['rolling_std_3'] = X['temp'].rolling(3).std()
# X['rolling_std_2'] = X['temp'].rolling(2).std()
# X['rolling_std_mean_3'] = X['temp'].rolling(3).std().rolling(3).mean()
# X['temp_gradient'] = np.gradient(X['temp'].values)
#
# # Add if previous value exceeds 25 degrees
# X['future_exceedence'] = np.where(X['temp'].shift(-2)>=27, 1, 0)
# X['prev_exceedence'] = np.where(X['temp'].shift(2)>=27, 1, 0)
#
# # Add last 3 hours of experienced indoor temperature
#
# X['hist_indoor'] = X['indoorTemp'].shift(3)
# X['hist_indoor_diff'] = X['indoorTemp'].shift(-3).diff(2)
#
# new_regressors = ['is_weekend', 'rolling_std_mean_3', 'future_exceedence','hist_indoor', 'temp_gradient']
X['y_data']=y
# if corr_plot:
# import matplotlib.pyplot as plt
# import seaborn as sns
# plt.figure()
# sns.heatmap(X.corr())
# X = X.dropna()
# y=X['y_data']
# # Print correlation values to help hint at what regression parameters
# # to choose
# CorrMatrix=pd.DataFrame(X.corr()['y_data'])
# print(CorrMatrix.sort_values(by=['y_data']))
#
X = X.drop(columns=['y_data'], axis=1)
X=X.iloc[2:]
y=y.iloc[2:]
return X,y
def toTensor(trainX, trainY, testX, testY):
# Sci-kit learn packages for normalizing data and post-processing
from sklearn import preprocessing
import torch
# Normalize all X data using Sci-kit standard scaler
scaler=preprocessing.MinMaxScaler(feature_range=(-1,1)).fit(trainX)
x_scaled = scaler.transform(trainX)
rescaled_X_train=pd.DataFrame(x_scaled, columns=trainX.columns,
index=trainX.index)
trainY[trainY<1]=-1
# Create torch tensors from training datasets
train_x = torch.from_numpy(rescaled_X_train.values).float().contiguous().cuda()
train_y = torch.from_numpy(trainY.values).float().contiguous().cuda()
# Normalize test dataset using same normalization range applied to the training dataset
x_test_scaled = scaler.transform(testX)
rescaled_X_test= | pd.DataFrame(x_test_scaled, columns=testX.columns, index=testX.index) | pandas.DataFrame |
import pandas as pd
import argparse
import pickle
from collections import defaultdict
COL2Label = {0:'transcript', 1: 'dna', 2: 'protein'}
parser = argparse.ArgumentParser(description='Variant Results.')
parser.add_argument('--results_file', type = str, required = True, help = 'paths results')
parser.add_argument('--output_path', type = str, default = 'variant_classification_dataset.csv')
parser.add_argument('--splits', type = str, nargs = '+', default = 'train dev test')
if __name__ == "__main__":
args = parser.parse_args()
results = pickle.load(open(args.results_file, 'rb'))
summary = defaultdict(list)
for mode in args.splits:
split_size = len(results['{}_stats'.format(mode)]['{}_strings'.format(mode)])
golds = [COL2Label[i] for i in results['{}_stats'.format(mode)]['{}_golds'.format(mode)] ]
preds = [COL2Label[i] for i in results['{}_stats'.format(mode)]['{}_preds'.format(mode)] ]
summary['VariantName'].extend(results['{}_stats'.format(mode)]['{}_strings'.format(mode)])
summary['DatasetSplit'].extend([mode]*split_size)
summary['TrueLabel'].extend(golds)
summary['PredictedLabel'].extend(preds)
predicted_correctly = results['{}_stats'.format(mode)]['{}_preds'.format(mode)] == results['{}_stats'.format(mode)]['{}_golds'.format(mode)]
summary['PredictedCorrectly'].extend(predicted_correctly)
summary = | pd.DataFrame(summary) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
from pandas import Series,DataFrame
import numpy as np
import itertools
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from collections import Counter
import re
import datetime as dt
from datetime import date
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import string
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from string import punctuation
from nltk.tokenize import TweetTokenizer
from nltk import tokenize
from wordcloud import WordCloud
from PIL import Image
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.cluster import MiniBatchKMeans
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc,precision_score, accuracy_score, recall_score, f1_score
from scipy import interp
import bokeh.plotting as bp
from bokeh.models import HoverTool, BoxSelectTool
from bokeh.plotting import figure, show, output_notebook
from bokeh.io import push_notebook, show, output_notebook
import bokeh.plotting as bplt
import lda
import pyLDAvis
import pyLDAvis.gensim
import warnings
warnings.filterwarnings("ignore")
import logging
import gensim
from gensim import corpora, models, similarities
from gensim.models.word2vec import Word2Vec
from gensim.models.doc2vec import Doc2Vec,TaggedDocument
from gensim.models.ldamodel import LdaModel
from copy import deepcopy
from pprint import pprint
from keras.models import Sequential
from keras.layers import Dense, Dropout, SimpleRNN, LSTM, Activation
from keras.callbacks import EarlyStopping
from keras.preprocessing import sequence
import pickle
import os
print(os.getcwd())
# Importing tweets from csv into dataframe
# (wczytanie danych tweetow z csv do ramki)
try:
tweets = pd.read_csv('bezrobocie_tweets_15.08.2017.csv', names = ['username', 'date', 'retweets', 'favorites', 'text', 'geo', 'mentions', 'hashtags', 'id', 'permalink'], sep=";",
skiprows=1, encoding='utf-8')
except ValueError as exc:
raise ParseError('CSV parse error - %s', parser_context=None)
print(tweets.head())
tweets.text[1]
# Removing duplicates from dataframe
# (usuniecie duplikatow tweetow z ramki)
#print('before', len(tweets)) # 21069
tweets.drop_duplicates(['text'], inplace=True)
print(len(tweets)) # 20803
# Separating the time variable by hour, day, month and year for further analysis using datetime
# (podzial zmiennej data na godzine, rok, miesiac)
tweets['date'] = | pd.to_datetime(tweets['date']) | pandas.to_datetime |
import sqlite3
import pandas as pd
import numpy as np
from datetime import datetime
class Rankings:
def run(self, database):
print("Starting product ranking...")
start_time = datetime.now()
conn = sqlite3.connect(database)
query = conn.execute("SELECT * From reviews")
cols = [column[0] for column in query.description]
results = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)
print("import completed!")
# Dataframe #1 #
# # average "overall" rating grouped by ASIN
# average_by_asin = results.groupby(['asin'])['overall'].mean().to_frame().reset_index()
# #average "overall" rating for all ASINs
# average_overall = results['overall'].mean()
# # create final dataframe
# final = average_by_asin
# final['average_overall'] = average_overall
# #print(average_by_asin)
# # show columns
# #print(list(average_by_asin.columns.values))
# # Rank (0 being the worst, N being the best) for this ASIN based on ave. overall score
# final['rank'] = final['overall'].rank(ascending=True, method='dense')
# #final['rank'] = final['rank']
# # https://www.geeksforgeeks.org/percentile-rank-of-a-column-in-a-pandas-dataframe/
# # Percentile (0<= p <=1) for this ASIN. The ASIN with the highest overall will be ranked 1, the lowest ASIN will be ranked 0.
# final['percentile'] = final['overall'].rank(pct = True)
# print(final)
# # write result to csv
# final.to_csv('output.csv', encoding='utf-8', index=False)
# Dataframe #2 #
# average "overall" rating grouped by ASIN
average_by_asin = results.groupby(['asin'])['overall'].mean().to_frame().reset_index()
average_by_asin['overall_average'] = average_by_asin['overall']
average_by_asin = average_by_asin[['asin', 'overall_average']]
print("average_by_asin completed!")
# count grouped by ASIN
count = results.groupby(['asin'])['overall'].count().to_frame().reset_index()
count['overall_count'] = count['overall']
count = count[['asin', 'overall_count']]
print("count completed!")
# stdev of overall grouped by ASIN
stdev = results.groupby(['asin'])['overall'].agg(np.std, ddof=0).to_frame().reset_index()
stdev['overall_stdev'] = stdev['overall']
stdev = stdev[['asin', 'overall_stdev']]
print("stdev completed!")
# length of all of the review texts
length_of_text = results
length_of_text['average_length_of_review_text'] = length_of_text['review_text'].str.split().str.len()
length_of_text = length_of_text[['asin', 'average_length_of_review_text']]
length_of_text = length_of_text.groupby(['asin'])[
'average_length_of_review_text'].mean().to_frame().reset_index()
print("length_of_text completed!")
# create final dataframe
final = pd.merge( | pd.merge(average_by_asin, count, on='asin') | pandas.merge |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)],
names=["one", "two"],
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
(
"category",
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
(
{"a": "category", "b": "category"},
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
(
"timedelta64[ns]",
DataFrame(
{
"a": Series([], dtype="timedelta64[ns]"),
"b": | Series([], dtype="timedelta64[ns]") | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: | pd.datetime.strptime(x, '%Y-%m-%d') | pandas.datetime.strptime |
# coding=utf-8
from datetime import datetime
from wit import Wit
from string import Template
from time import sleep
from collections import namedtuple
from pathlib import Path
import pandas as pd
import deepcut
import os
import glob
import pickle
import config
toq_key = config.toq_key
say_key = config.say_key
sub_key = config.sub_key
sec_key = config.sec_key
who_key = config.who_key
now_here = os.getcwd()
def get_file_name(dir_file):
fn = os.path.basename(dir_file)
fn_alone = os.path.splitext(fn)[0]
return fn_alone
# df คือตาราง extend คือคำที่ให้เข้าใจว่าตารางนี้เปลี่ยนไปอย่างไร และเป็นชื่อโฟลเดอร์สำหรับเก็บไฟล์นี้ด้วย
def export_file(old_table, new_table, extend):
file_name = os.path.basename(old_table)
fn_no_extension = os.path.splitext(file_name)[0]
path_here = os.getcwd()
# ส่งออกตาราง df
directory = os.path.join(path_here, extend)
if not os.path.exists(directory):
os.makedirs(directory)
export_file_dir = os.path.join(directory, fn_no_extension + '_{!s}.csv'.format(extend))
new_table.to_csv(export_file_dir, sep='\t', encoding='utf-8')
print('ส่งออกไฟล์ {!s} แล้ว'.format(fn_no_extension + '_{!s}.csv'.format(extend)))
# เริ่มจากนำเข้า csv ที่ได้จาก txt ที่ export มาจาก line
# แล้วนำไปเปลี่ยนแปลงให้ได้ตารางที่ประกอบด้วย เวลาในการส่งข้อความ (time) ชื่อผู้ส่งข้อความ (name) และ ข้อความ (text)
def clean_table(file_path):
# chat คือ ตารางที่มาจากตาราง csv ที่เราจะ clean
chat = pd.read_csv(file_path)
# chat_mod คือ ตารางที่มาจาก chat แต่ใส่ชื่อให้คอลัมน์ใหม่
chat_mod = pd.DataFrame({'time': chat.ix[:, 0], 'name': chat.ix[:, 1], 'text': chat.ix[:, 2]})
# ถ้าข้อมูลที่ส่งเข้ามาตัดอักษรห้าตัวข้างหน้าแล้วเป็นวันที่ จะถูกส่งกลับแค่วันที่
# ส่วนข้อมูลอื่น ๆ ที่ไม่ใช่เงื่อนไขนี้ จะไม่ถูกทำอะไร ส่งกลับแบบเดิม
def validate(date_text):
try:
datetime.strptime(date_text[5:], '%d/%m/%Y')
b = date_text[5:]
return b
except ValueError:
return date_text
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%H:%M' หรือไม่
def tm(t):
try:
datetime.strptime(t, '%H:%M')
return True
except ValueError:
return False
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%d/%m/%Y' หรือไม่
def date(d):
try:
datetime.strptime(d, '%d/%m/%Y')
return True
except ValueError:
return False
# เอาข้อมูลในคอลัมน์ time ตัวที่มีชื่อวัน ตัดชื่อวันออก ตัวอื่น ๆ ไม่ทำไร แล้วใส่เป็น list
na = []
for vela in chat_mod['time']:
k = validate(str(vela))
na.append(k)
# เอาข้อมูลในลิสต์ na มาดู
for s in na:
# ถ้าข้อมูลในลิสต์อยู่ในรูปแบบ '%H:%M'
if tm(s):
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s อยู่ในรูปแบบ '%d/%m/%Y'
if date(na[na.index(s) - 1]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ตามด้วย วรรค ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1] + " " + s
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s ถูกตัดท้าย 6 ตัวอักษร แล้วอยู่ในรูปแบบ '%d/%m/%Y'
elif date(na[na.index(s) - 1][:-6]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ที่ถูกตัดท้าย 6 ตัวอักษรแล้ว ตามด้วย วรรค
# ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1][:-6] + " " + s
# ถ้าข้อมูลอยู่ในรูปแบบอื่น ๆ ไม่ต้องทำไร
else:
pass
# เสร็จแล้วจะได้ na ที่มีสมาชิกอยู่ในรูปแบบ %d/%m/%Y %H:%M
# time_mod คือคอลัมน์ที่มีวันที่อยู่หน้าเวลา ในรูปแบบ %d/%m/%Y %H:%M
chat_mod['time_mod'] = pd.Series(na)
# fd เป็นตารางที่มี 3 คอลัมน์
fd = chat_mod[['time_mod', 'name', 'text']]
# dfd เป็นตารางที่ลบ row ที่คอลัมน์ text ไม่มีค่า
dfd = fd.dropna(subset=['text'])
# ลิสต์เหล่านี้มาจากแต่ละคอลัมน์ของ dfd
a1 = dfd['time_mod'].tolist()
a2 = dfd['name'].tolist()
a3 = dfd['text'].tolist()
# นำ a1 a2 a3 มาสร้างตารางใหม่ ชื่อ df
df = pd.DataFrame({'time': a1, 'name': a2, 'text': a3})
export_file(file_path, df, 'cleaned')
return df
def time_inter(ct):
b1 = pd.Series(ct['time'])
b2 = pd.Series(ct['time'])
temp_vela = '%d/%m/%Y %H:%M'
la = 0
minute_set = []
for _ in b1:
try:
c1 = datetime.strptime(b1[la - 1], temp_vela)
c2 = datetime.strptime(b2[la], temp_vela)
d1 = c2 - c1
minute_set.append(d1)
la = la + 1
except KeyError:
c1 = datetime.strptime(b1[la], temp_vela)
d1 = c1 - c1
minute_set.append(d1)
la = la + 1
# คอลัมน์ time_ans แสดงเวลาก่อนจะตอบ เป็นหน่วย วันตามด้วยเวลาแบบ 00:00:00
time_ans = pd.Series(minute_set)
# คอลัมน์ time_min แสดงเวลาก่อนจะตอบ เป็นหน่วย minute
time = pd.DatetimeIndex(time_ans)
time_min = (time.day - 1) * 24 * 60 + time.hour * 60 + time.minute
return time_min
def sender_num(ct):
# แปลงชื่อผู้ส่งข้อความเป็นตัวเลข
ra = []
name_set = set(ct['name'].tolist())
name_list = list(name_set)
for each_name in ct['name']:
ra.append(name_list.index(each_name))
return ra
def numb_text(ct):
sii = 1
yaa = []
x = ct['name'].tolist()
lal = 0
# x คือ ลิสต์ของตัวเลขผู้ส่งข้อความ
# พิจารณาตัวเลขผู้ส่งข้อความของแต่ละข้อความ
for each_name in x:
# n คือ เลขผู้ส่งข้อความที่สนใจ
# na2 คือ สมาชิกตัวที่อยู่ก่อนหน้าหน้า n
na2 = x[lal - 1]
# ถ้า เลขผู้ส่งข้อความที่สนใจ เป็นตัวเดียวกับเลขของผู้ส่งข้อความที่อยู่ก่อนหน้า
if each_name == na2:
# เพิ่มค่า sii เดิม ลงใน yaa
yaa.append(sii)
# ถ้า เลขผู้ส่งข้อความที่สนใจ ไม่ใช่ตัวเดียวกับตัวเลขของผู้ส่งข้อความที่อยู่ก่อนหน้า
elif each_name != na2:
# ปรับ sii เป็น 1 แล้วเพิ่มเข้า sii
sii = 1
yaa.append(sii)
# เปลี่ยนค่า sii สำหรับรอใส่ใน yaa ถ้าประโยคต่อไปเป็นผู้ส่งคนเดียวกัน
sii = sii + 1
# เปลี่ยนค่า lal เพื่อเป็นตัวนำไปคำนวณระบุตำแหน่งของตัวเลขผู้ส่งก่อนหน้า
lal = lal + 1
return yaa
def word_separation(text):
# custom_dict = '/Users/bigmorning/Desktop/myword.txt'
sep_text = deepcut.tokenize(text)
join_sep_text = " ".join(sep_text)
return join_sep_text
def extract_value(inp_text, wit_token):
understanding = Wit(wit_token)
deep = understanding.message(inp_text)
try:
intent_value = deep['data'][0]['__wit__legacy_response']['entities']['intent'][0]['value']
except KeyError:
try:
intent_value = deep['entities']['intent'][0]['value']
except KeyError:
intent_value = deep['entities']
return intent_value
def show_progress(mal, l):
try:
s0 = Template('เพิ่มค่า $value ในเซต $set')
s1 = s0.substitute(value=mal, set=l)
except TypeError:
s0 = Template('เพิ่มค่า $value ในเซต $set')
s1 = s0.substitute(value=str(mal), set=l)
return print(s1)
def load_keep(extend, file_path, sv_wcs, sv_secs, sv_scs, sv_ws, sv_ts, sv_ss):
directory = os.path.join(now_here, extend, get_file_name(file_path) + '_keep.txt')
if not os.path.exists(directory):
with open(directory, "wb") as fp:
word_count_set = sv_wcs
sen_count_set = sv_secs
sub_count_set = sv_scs
who_set = sv_ws
toq_set = sv_ts
say_set = sv_ss
pickle.dump((word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set), fp)
return word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set
else:
with open(directory, "rb") as fp:
word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set = pickle.load(fp)
return word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set
def save_keep(extend, file_path, word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set, n):
directory = os.path.join(now_here, extend, get_file_name(file_path) + '_keep.txt')
if n % 5 == 0:
with open(directory, "wb") as fp:
pickle.dump((word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set), fp)
else:
pass
def initial_assignment(file_path, ct):
ia = namedtuple('type', 'wordCount senCount sAppear menWho senType doType')
text = ct['text'].tolist()
word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set = load_keep('analyse',
file_path,
[], [], [], [], [], [])
for n, r in enumerate(text):
if n == len(word_count_set):
print('เริ่มวิเคราะห์ประโยคที่ {!s} : {!s}'.format(str(n), r))
sep_word = word_separation(r)
# นับคำใน text box
word_count = len(sep_word.split())
word_count_set.append(word_count)
show_progress(word_count, 'word_count_set')
# การสื่อสารใน text box เป็นกี่ประโยค มี 0, 1, มากกว่า 1
sen_count = extract_value(sep_word, sec_key)
sen_count_set.append(sen_count)
show_progress(sen_count, 'sen_count_set')
# ระบุประธานของประโยคหรือไม่
sub_count = extract_value(sep_word, sub_key)
sub_count_set.append(sub_count)
show_progress(sub_count, 'sub_count_set')
# ประโยคนี้พูดเกี่ยวกับตัวเอง หรือคู่สนทนา หรือทั้งสอง หรืออย่างอื่น
who = extract_value(sep_word, who_key)
who_set.append(who)
show_progress(who, 'who_set')
# ประโยคนั้นเป็นบอกเล่าหรือคำถาม
toq = extract_value(sep_word, toq_key)
toq_set.append(toq)
show_progress(toq, 'toq_set')
# การกระทำของประโยคนั้น
say = extract_value(sep_word, say_key)
say_set.append(say)
show_progress(say, 'say_set')
print("----------เสร็จสิ้นแถวที่ " + str(n) + " ----------")
save_keep('analyse', file_path, word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set, n)
df = pd.DataFrame({'name': ct['name'],
'text': ct['text'],
'wordCount': word_count_set,
'senCount': sen_count_set,
'sAppear': sub_count_set,
'menWho': who_set,
'senType': toq_set,
'doType': say_set})
export_file(file_path, df, 'analyse')
return ia(wordCount=word_count_set,
senCount=sen_count_set,
sAppear=sub_count_set,
menWho=who_set,
senType=toq_set,
doType=say_set)
def som(file_path, ct):
ia = namedtuple('type', 'wordCount senCount sAppear menWho senType doType')
# หาว่าจะเรียกไฟล์ csv ตัวไหนมาซ่อม
# เรียกจากไฟล์ที่ติด extend analyse
ext = 'analyse'
directory = os.path.join(now_here, ext)
# path ที่อยู่ของไฟล์ csv ที่จะเรียกมาซ่อม
call_csv = os.path.join(directory, get_file_name(file_path) + '_{!s}.csv'.format(ext))
# เปิดไฟล์
last_csv = pd.read_csv(call_csv, sep='\t')
# แปลงแต่ละคอลัมน์เป็นลิสต์
# แล้วเรียก load_keep
word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set = load_keep('anCom',
file_path,
last_csv['wordCount'].tolist(),
last_csv['senCount'].tolist(),
last_csv['sAppear'].tolist(),
last_csv['menWho'].tolist(),
last_csv['senType'].tolist(),
last_csv['doType'].tolist())
an_key = {'senCount': sec_key, 'sAppear': sub_key, 'menWho': who_key, 'senType': toq_key, 'doType': say_key}
type_set = {'senCount': sen_count_set, 'sAppear': sub_count_set, 'menWho': who_set, 'senType': toq_set,
'doType': say_set}
text = ct['text'].tolist()
for v in list(type_set.values()):
for n, i in enumerate(v):
while i == str({}) or i == {}:
key = list(type_set.keys())[list(type_set.values()).index(v)]
token_word_list = deepcut.tokenize(text[n])
token_word = ' '.join(token_word_list)
v[n] = extract_value(token_word, an_key[key])
print('เปลี่ยนเซตว่างใน {!s} แถวที่ {!s} เป็น {!s}'.format(key, str(n), v[n]))
if v[n] == {}:
print('ทำอีกรอบ พิจารณา ประโยค : ' + token_word)
sleep(20)
else:
save_keep('anCom',
file_path,
word_count_set,
sen_count_set,
sub_count_set,
who_set,
toq_set,
say_set,
5)
i = v[n]
df = pd.DataFrame({'name': ct['name'],
'text': ct['text'],
'wordCount': word_count_set,
'senCount': sen_count_set,
'sAppear': sub_count_set,
'menWho': who_set,
'senType': toq_set,
'doType': say_set})
export_file(file_path, df, 'anCom')
return ia(wordCount=word_count_set,
senCount=sen_count_set,
sAppear=sub_count_set,
menWho=who_set,
senType=toq_set,
doType=say_set)
all_file = glob.glob(now_here + '/csv_line/*.csv')
all_file_analyse = os.listdir(now_here + '/analyse')
all_file_anCom = os.listdir(now_here + '/anCom')
# จะทำ analyse ให้เสร็จก่อน
for file in all_file:
new_table = clean_table(file)
min_time = time_inter(new_table)
num_sender = sender_num(new_table)
num_text = numb_text(new_table)
if get_file_name(file) + '_analyse.csv' not in all_file_analyse:
print('ไม่มี {!s} ในโฟลเดอร์ analyse ดังนั้นจะเริ่ม analyse'.format(get_file_name(file)))
path = Path(file)
print('กำลังดู ' + str(path))
initial_assignment(file, new_table)
elif get_file_name(file) + '_anCom.csv' not in all_file_anCom:
print('มี {!s} ในโฟลเดอร์ analyse แล้ว ดังนั้นจะไม่ analyse อีก'.format(get_file_name(file)))
print('ไม่มี {!s} ในโฟลเดอร์ anCom ดังนั้นจะเริ่ม analyse'.format(get_file_name(file)))
analyse = som(file, new_table)
new_table['min'] = pd.Series(min_time)
new_table['num_sender'] = pd.Series(num_sender)
new_table['num_text'] = pd.Series(num_text)
new_table['word_count'] = pd.Series(analyse.wordCount)
new_table['sen_count'] = pd.Series(analyse.senCount)
new_table['s_appear'] = | pd.Series(analyse.sAppear) | pandas.Series |
"""
Pull information using python ColecticaPortal api
"""
from io import StringIO
import xml.etree.ElementTree as ET
import pandas as pd
import json
import api
def remove_xml_ns(xml):
"""
Read xml from string, remove namespaces, return root
"""
it = ET.iterparse(StringIO(xml))
for _, el in it:
prefix, has_namespace, postfix = el.tag.partition('}')
if has_namespace:
el.tag = postfix # strip all namespaces
root = it.root
return root
def root_to_dict_study(root):
"""
Part of parse xml, item_type = Study
"""
info = {}
info['URN'] = root.find('.//URN').text
# Sweep Description
sweep = {}
sweep['title'] = root.find('.//Citation/Title/String').text
sweep['principal_investigator'] = root.find('.//Citation/Creator/CreatorName/String').text
if root.find('.//Citation/Publisher') is not None:
sweep['publisher'] = root.find('.//Citation/Publisher/PublisherName/String').text
else:
sweep['publisher'] = None
sweep['abstract'] = root.find('.//Abstract/Content').text
pop = {}
if root.find('.//UniverseReference') is not None:
pop['Agency'] = root.find('.//UniverseReference/Agency').text
pop['ID'] = root.find('.//UniverseReference/ID').text
pop['Version'] = root.find('.//UniverseReference/Version').text
pop['Type'] = root.find('.//UniverseReference/TypeOfObject').text
sweep['population'] = pop
# custom filed
CustomFields = root.findall('.//UserAttributePair/AttributeValue')
custom_list = []
for x, cus in enumerate(CustomFields):
# Convert a string representation of a dictionary to a dictionary
custom_list.append(json.loads(cus.text))
sweep['custom_field'] = custom_list
info['sweep'] = sweep
# Funding
funding = {}
organization = {}
organization['Agency'] = root.find('.//FundingInformation/AgencyOrganizationReference/Agency').text
organization['ID'] = root.find('.//FundingInformation/AgencyOrganizationReference/ID').text
organization['Version'] = root.find('.//FundingInformation/AgencyOrganizationReference/Version').text
organization['Type'] = root.find('.//FundingInformation/AgencyOrganizationReference/TypeOfObject').text
funding['organization'] = organization
info['funding'] = funding
# TODO: Coverage
coverages = root.findall('.//Coverage')
# Data
data = {}
k = root.find('.//KindOfData')
data['KindOfData'] = '-'.join(item.text for item in k)
data['Analysis Unit'] = root.find('.//AnalysisUnit').text
# data files
datafile = {}
if root.find('.//PhysicalInstanceReference') is not None:
datafile['Agency'] = root.find('.//PhysicalInstanceReference/Agency').text
datafile['ID'] = root.find('.//PhysicalInstanceReference/ID').text
datafile['Version'] = root.find('.//PhysicalInstanceReference/Version').text
datafile['Type'] = root.find('.//PhysicalInstanceReference/TypeOfObject').text
data['Data File'] = datafile
info['data'] = data
# data collection
datacol = {}
datacol['Agency'] = root.find('.//DataCollectionReference/Agency').text
datacol['ID'] = root.find('.//DataCollectionReference/ID').text
datacol['Version'] = root.find('.//DataCollectionReference/Version').text
datacol['Type'] = root.find('.//DataCollectionReference/TypeOfObject').text
info['Data Collection'] = datacol
# Extra
metadata = {}
if root.find('.//RequiredResourcePackages/ResourcePackageReference') is not None:
metadata['Agency'] = root.find('.//RequiredResourcePackages/ResourcePackageReference/Agency').text
metadata['ID'] = root.find('.//RequiredResourcePackages/ResourcePackageReference/ID').text
metadata['Version'] = root.find('.//RequiredResourcePackages/ResourcePackageReference/Version').text
metadata['Type'] = root.find('.//RequiredResourcePackages/ResourcePackageReference/TypeOfObject').text
info['Metadata Packages'] = metadata
return info
def root_to_dict_series(root):
"""
Part of parse xml, item_type = Series
"""
info = {}
info['URN'] = root.find('.//URN').text
# Study Description
study = {}
study['title'] = root.find('.//Citation/Title/String').text
study['principal_investigator'] = root.find('.//Citation/Creator/CreatorName/String').text
study['publisher'] = root.find('.//Citation/Publisher/PublisherName/String').text
study['rights'] = root.find('.//Citation/Copyright/String').text
study['abstract'] = root.find('.//Abstract/Content').text
pop = {}
pop['Agency'] = root.find('.//UniverseReference/Agency').text
pop['ID'] = root.find('.//UniverseReference/ID').text
pop['Version'] = root.find('.//UniverseReference/Version').text
pop['Type'] = root.find('.//UniverseReference/TypeOfObject').text
study['population'] = pop
info['study'] = study
# Funding
funding = {}
if not root.find('.//FundingInformation/GrantNumber') is None:
funding['GrantNumber'] = root.find('.//FundingInformation/GrantNumber').text
organization = {}
organization['Agency'] = root.find('.//FundingInformation/AgencyOrganizationReference/Agency').text
organization['ID'] = root.find('.//FundingInformation/AgencyOrganizationReference/ID').text
organization['Version'] = root.find('.//FundingInformation/AgencyOrganizationReference/Version').text
organization['Type'] = root.find('.//FundingInformation/AgencyOrganizationReference/TypeOfObject').text
funding['organization'] = organization
info['funding'] = funding
# Studies
studies = root.findall('.//StudyUnitReference')
study_list = []
for x, study in enumerate(studies):
study_dict={}
study_dict['position'] = x + 1
study_dict['Agency'] = study.find(".//Agency").text
study_dict['ID'] = study.find(".//ID").text
study_dict['Version'] = study.find(".//Version").text
study_dict['Type'] = study.find(".//TypeOfObject").text
study_list.append(study_dict)
info['study'] = study_list
return info
def root_to_dict_metadata_package(root):
"""
Part of parse xml, item_type = Data Collection
"""
info = dict([[i.attrib['typeOfUserID'].split(':')[-1], i.text] for i in root.findall('.//UserID')])
info['URN'] = root.find('.//URN').text
info['VersionResponsibility'] = root.find('.//VersionResponsibility').text
if not root.find('.//VersionRationale') is None:
info['VersionRationale'] = root.find('.//VersionRationale/RationaleDescription/String').text
info['Citation'] = root.find('.//Citation/Title/String').text
info['Purpose'] = root.find('.//Purpose/Content').text
# InterviewerInstructionSchemeReference
instruction_dict = {}
instruction_ref = root.find('.//InterviewerInstructionSchemeReference')
if not instruction_ref is None:
instruction_dict['Agency'] = instruction_ref.find(".//Agency").text
instruction_dict['ID'] = instruction_ref.find(".//ID").text
instruction_dict['Version'] = instruction_ref.find(".//Version").text
instruction_dict['Type'] = instruction_ref.find(".//TypeOfObject").text
info['InterviewerInstructionSchemeReference'] = instruction_dict
# ControlConstructSchemeReference
cc_dict = {}
cc_ref = root.find('.//ControlConstructSchemeReference')
if not instruction_ref is None:
cc_dict['Agency'] = cc_ref.find(".//Agency").text
cc_dict['ID'] = cc_ref.find(".//ID").text
cc_dict['Version'] = cc_ref.find(".//Version").text
cc_dict['Type'] = cc_ref.find(".//TypeOfObject").text
info['ControlConstructSchemeReference'] = cc_dict
# QuestionSchemeReference
question_all = root.findall('.//QuestionSchemeReference')
question_list = []
for question in question_all:
question_dict = {}
question_dict['Agency'] = question.find('.//Agency').text
question_dict['ID'] = question.find('.//ID').text
question_dict['Version'] = question.find('.//Version').text
question_dict['Type'] = question.find('.//TypeOfObject').text
question_list.append(question_dict)
info['QuestionSchemeReference'] = question_list
# CategorySchemeReference
category_dict = {}
category_ref = root.find('.//CategorySchemeReference')
if not instruction_ref is None:
category_dict['Agency'] = category_ref.find(".//Agency").text
category_dict['ID'] = category_ref.find(".//ID").text
category_dict['Version'] = category_ref.find(".//Version").text
category_dict['Type'] = category_ref.find(".//TypeOfObject").text
info['CategorySchemeReference'] = category_dict
# CodeListSchemeReference
code_dict = {}
code_ref = root.find('.//CodeListSchemeReference')
if not instruction_ref is None:
code_dict['Agency'] = code_ref.find(".//Agency").text
code_dict['ID'] = code_ref.find(".//ID").text
code_dict['Version'] = code_ref.find(".//Version").text
code_dict['Type'] = code_ref.find(".//TypeOfObject").text
info['CodeListSchemeReference'] = code_dict
# InstrumentSchemeReference
instrument_dict = {}
instrument_ref = root.find('.//InstrumentSchemeReference')
if not instruction_ref is None:
instrument_dict['Agency'] = instrument_ref.find(".//Agency").text
instrument_dict['ID'] = instrument_ref.find(".//ID").text
instrument_dict['Version'] = instrument_ref.find(".//Version").text
instrument_dict['Type'] = instrument_ref.find(".//TypeOfObject").text
info['InstrumentSchemeReference'] = instrument_dict
return info
def root_to_dict_data_collection(root):
"""
Part of parse xml, item_type = Data Collection
"""
info = {}
print(info)
info['URN'] = root.find('.//URN').text
info['Name'] = root.find('.//DataCollectionModuleName/String').text
info['Label'] = root.find('.//Label/Content').text
# InstrumentRef
cus_dict = {}
cus = root.findall('.//UserAttributePair')
for item in cus:
k = item.find('.//AttributeKey').text.split(':')[-1]
v = item.find('.//AttributeValue').text.replace('[','').replace(']','').replace('"', '').replace("'","")
cus_dict[k] = v
info['Ref'] = cus_dict
# CollectionEvent
event = {}
event['URN'] = root.find('.//CollectionEvent/URN').text
event['Agency'] = root.find('.//CollectionEvent/Agency').text
event['ID'] = root.find('.//CollectionEvent/ID').text
event['Version'] = root.find('.//CollectionEvent/Version').text
# Organization Reference
organization_list = []
organization_all = root.findall('.//CollectionEvent/DataCollectorOrganizationReference')
for organization in organization_all:
OrganizationRef = {}
OrganizationRef['Agency'] = organization.find('.//Agency').text
OrganizationRef['ID'] = organization.find('.//ID').text
OrganizationRef['Version'] = organization.find('.//Version').text
OrganizationRef['Type'] = organization.find('.//TypeOfObject').text
event['OrganizationRef'] = organization_list
# Data Collection Date
DCDate = {}
date = root.find('.//CollectionEvent/DataCollectionDate')
if date.find('.//StartDate') is not None:
DCDate['StartDate'] = date.find('.//StartDate').text
elif date.find('.//EndDate') is not None:
DCDate['EndDate'] = date.find('.//EndDate').text
elif date.find('.//SimpleDate') is not None:
DCDate['SimpleDate'] = date.find('.//SimpleDate').text
event['Date'] = DCDate
# Mode Of Collection
mode_list = []
mode_all = root.findall('.//CollectionEvent/ModeOfCollection')
# list multiple types
for type_mode in mode_all:
mode_dict = {}
mode_dict['URN'] = type_mode.find('./URN').text
mode_dict['Agency'] = type_mode.find('./Agency').text
mode_dict['ID'] = type_mode.find('./ID').text
mode_dict['Version'] = type_mode.find('./Version').text
mode_dict['TypeOfMode'] = type_mode.find('./TypeOfModeOfCollection').text
mode_dict['Description'] = type_mode.find('./Description/Content').text
mode_list.append(mode_dict)
event['ModeOfCollection'] = mode_list
info['CollectionEvent'] = event
# Question Scheme Reference
QSR = {}
if root.find('.//QuestionSchemeReference') is not None:
QSR['Agency'] = root.find('.//QuestionSchemeReference/Agency').text
QSR['ID'] = root.find('.//QuestionSchemeReference/ID').text
QSR['Version'] = root.find('.//QuestionSchemeReference/Version').text
QSR['TypeOfObject'] = root.find('.//QuestionSchemeReference/TypeOfObject').text
info['reference'] = QSR
return info
def root_to_dict_sequence(root):
"""
Part of parse xml, item_type = Sequence
"""
info = {}
info['URN'] = root.find('.//URN').text
info['SourceId'] = root.find('.//UserID').text
if root.find('.//ConstructName/String') is not None:
info['ConstructName'] = root.find('.//ConstructName/String').text
else:
info['ConstructName'] = None
if root.find('.//Label/Content') is not None:
info['Label'] = root.find('.//Label/Content').text
else:
info['Label'] = None
references = root.findall(".//ControlConstructReference")
ref_list = []
for x, ref in enumerate(references):
ref_dict={}
ref_dict['position'] = x + 1
ref_dict['Agency'] = ref.find(".//Agency").text
ref_dict['ID'] = ref.find(".//ID").text
ref_dict['Version'] = ref.find(".//Version").text
ref_dict['Type'] = ref.find(".//TypeOfObject").text
ref_list.append(ref_dict)
info['references'] = ref_list
return info
def root_to_dict_statement(root):
"""
Part of parse xml, item_type = Statement
"""
info = {}
info['StatementURN'] = root.find('.//URN').text
info['SourceId'] = root.find('.//UserID').text
instruction = root.find(".//UserAttributePair/AttributeValue").text
if instruction == '{}':
info['Instruction'] = ''
else:
info['Instruction'] = instruction
info['Label'] = root.find(".//ConstructName/String").text
info['Literal'] = root.find(".//DisplayText/LiteralText/Text").text
return info
def root_to_dict_organization(root):
"""
Part of parse xml, item_type = Organization
"""
info = {}
info['URN'] = root.find('.//URN').text
# Nickname
cus_dict = {}
cus = root.findall('.//UserAttributePair')
for item in cus:
k = item.find('.//AttributeKey').text.split(':')[-1]
v = item.find('.//AttributeValue').text
cus_dict[k] = v
info['cust'] = cus_dict
info['Name'] = root.find('.//OrganizationIdentification/OrganizationName/String').text
info['Image'] = root.find('.//OrganizationIdentification/OrganizationImage/ImageLocation').text
info['Description'] = root.find('.//Description/Content').text
return info
def root_to_dict_instrument(root):
"""
Part of parse xml, item_type = Instrument
"""
info = {}
info['InstrumentURN'] = root.find('.//URN').text
if root.findall(".//*[@typeOfUserID='colectica:sourceId']") != []:
info['InstrumentSourceID'] = root.findall(".//*[@typeOfUserID='colectica:sourceId']")[0].text
if root.findall(".//*[@typeOfUserID='closer:sourceFileName']") != []:
info['InstrumentLabel'] = root.findall(".//*[@typeOfUserID='closer:sourceFileName']")[0].text
info['InstrumentName'] = root.find(".//InstrumentName/String").text
if not root.find(".//ExternalInstrumentLocation") is None:
info['ExternalInstrumentLocation'] = root.find(".//ExternalInstrumentLocation").text
else:
info['ExternalInstrumentLocation'] = None
references = root.findall(".//ControlConstructReference")
ref_list = []
for x, ref in enumerate(references):
ref_dict={}
ref_dict['position'] = x + 1
ref_dict['Agency'] = ref.find(".//Agency").text
ref_dict['ID'] = ref.find(".//ID").text
ref_dict['Version'] = ref.find(".//Version").text
ref_dict['Type'] = ref.find(".//TypeOfObject").text
ref_list.append(ref_dict)
info['references'] = ref_list
return info
def root_to_dict_question_group(root):
"""
Part of parse xml, item_type = Question Group
"""
info = {}
info['URN'] = root.find('.//URN').text
if root.find('.//QuestionGroupName/String') is not None:
info['Name'] = root.find('.//QuestionGroupName/String').text
else:
info['Name'] = None
if root.find('.//Label/Content').text is not None:
info['Label'] = root.find('.//Label/Content').text
else:
info['Label'] = None
# Concept Reference
ConceptRef = {}
if root.find('.//ConceptReference') is not None:
ConceptRef['Agency'] = root.find('.//ConceptReference/Agency').text
ConceptRef['ID'] = root.find('.//ConceptReference/ID').text
ConceptRef['Version'] = root.find('.//ConceptReference/Version').text
ConceptRef['Type'] = root.find('.//ConceptReference/TypeOfObject').text
info['ConceptRef'] = ConceptRef
# Question Item Reference
QuestionItemRef = root.findall(".//QuestionItemReference")
QIref_list = []
for x, ref in enumerate(QuestionItemRef):
ref_dict={}
ref_dict['position'] = x + 1
ref_dict['Agency'] = ref.find(".//Agency").text
ref_dict['ID'] = ref.find(".//ID").text
ref_dict['Version'] = ref.find(".//Version").text
ref_dict['Type'] = ref.find(".//TypeOfObject").text
QIref_list.append(ref_dict)
info['QuestionItemRef'] = QIref_list
# Question Group Reference
QuestionGroupRef = root.findall(".//QuestionGroupReference")
QGref_list = []
for x, ref in enumerate(QuestionGroupRef):
ref_dict={}
ref_dict['position'] = x + 1
ref_dict['Agency'] = ref.find(".//Agency").text
ref_dict['ID'] = ref.find(".//ID").text
ref_dict['Version'] = ref.find(".//Version").text
ref_dict['Type'] = ref.find(".//TypeOfObject").text
QGref_list.append(ref_dict)
info['QuestionGroupRef'] = QGref_list
return info
def root_to_dict_concept(root):
"""
Part of parse xml, item_type = Concept
"""
info = {}
info['URN'] = root.find('.//URN').text
info['VersionResponsibility'] = root.find('.//VersionResponsibility').text
info['VersionRationale'] = root.find('.//VersionRationale/RationaleDescription/String').text
info['Name'] = root.find('.//ConceptName/String').text
info['Label'] = root.find('.//Label/Content').text
return info
def root_to_dict_interviewer_instruction(root):
"""
Part of parse xml, item_type = Interviewer Instruction
"""
info = {}
info['InstructionURN'] = root.find('.//URN').text
info['UserID'] = root.find('.//UserID').text
info['InstructionText'] = root.find('.//InstructionText/LiteralText/Text').text
return info
def root_to_dict_question(root):
"""
Part of parse xml, item_type = Question
"""
info = {}
info['QuestionURN'] = root.find('.//URN').text
info['QuestionUserID'] = root.find('.//UserID').text
QLabel = root.find('.//UserAttributePair/AttributeValue').text
info['QuestionLabel'] = list(eval(QLabel).values())[0]
info['QuestionItemName'] = root.find(".//QuestionItemName/String").text
if root.find(".//QuestionText/LiteralText/Text") is not None:
info['QuestionLiteral'] = root.find(".//QuestionText/LiteralText/Text").text
else:
info['QuestionLiteral'] = None
# ResponseCardinality
cardinality = root.find('.//ResponseCardinality')
car_dict = {}
car_dict['minimumResponses'] = cardinality.attrib['minimumResponses']
car_dict['maximumResponses'] = cardinality.attrib['maximumResponses']
info['ResponseCardinality'] = car_dict
# response
response = {}
CodeDomain = root.find(".//CodeDomain")
TextDomain = root.find(".//TextDomain")
NumericDomain = root.find(".//NumericDomain")
DateTimeDomain = root.find(".//DateTimeDomain")
if CodeDomain is not None:
response['response_type'] = 'CodeList'
response['CodeList_Agency'] = root.find('.//CodeListReference/Agency').text
response['CodeList_ID'] = CodeDomain.find(".//CodeListReference/ID").text
response['CodeList_version'] = CodeDomain.find(".//CodeListReference/Version").text
response['code_list_URN'] = (':').join(['urn:ddi', response['CodeList_Agency'], response['CodeList_ID'], response['CodeList_version']])
elif TextDomain is not None:
response['response_type'] = 'Text'
response['response_label'] = TextDomain.find(".//Label/Content").text
elif NumericDomain is not None:
response['response_type'] = 'Numeric'
response['response_label'] = root.find(".//Label").text
response['response_NumericType'] = root.find(".//NumericTypeCode").text
if root.find(".//NumberRange/Low") is not None:
response['response_RangeLow'] = root.find(".//NumberRange/Low").text
else:
response['response_RangeLow'] = None
if root.find(".//NumberRange/High") is not None:
response['response_RangeHigh'] = root.find(".//NumberRange/High").text
else:
response['response_RangeHigh'] = None
elif DateTimeDomain is not None:
response['response_type'] = 'DateTime'
response['DateTypeCode'] = DateTimeDomain.find(".//DateTypeCode").text
response['response_label'] = DateTimeDomain.find(".//Label/Content").text
info['Response'] = response
# InterviewerInstructionReference
inst_dict = {}
InstructionRef = root.find(".//InterviewerInstructionReference")
if InstructionRef is not None:
inst_dict['Agency'] = InstructionRef.find(".//Agency").text
inst_dict['ID'] = InstructionRef.find(".//ID").text
inst_dict['Version'] = InstructionRef.find(".//Version").text
inst_dict['Type'] = InstructionRef.find(".//TypeOfObject").text
info['Instruction'] = inst_dict
return info
def root_to_dict_question_grid(root):
"""
Part of parse xml, item_type = Question Grid
"""
info = {}
info['QuestionGridURN'] = root.find('.//URN').text
info['QuestionGridUserID'] = root.find('.//UserID').text
info['QuestionGridLabel'] = root.find(".//UserAttributePair/AttributeValue").text
info['QuestionGridName'] = root.find('.//QuestionGridName/String').text
info['QuestionGridLiteral'] = root.find('.//QuestionText/LiteralText/Text').text
# GridDimension
GridDimension = root.findall('.//GridDimension')
grid_dimension_list = []
for x, dim in enumerate(GridDimension):
dim_dict={}
dim_dict['rank'] = dim.attrib['rank']
ResponseCardinality = dim.find('.//CodeDomain/ResponseCardinality')
dim_dict['minimumResponses'] = ResponseCardinality.attrib['minimumResponses']
dim_dict['maximumResponses'] = ResponseCardinality.attrib['maximumResponses']
code_ref_dict = {}
code_ref = dim.find('.//CodeDomain/CodeListReference')
if not code_ref is None:
code_ref_dict['Agency'] = code_ref.find('.//Agency').text
code_ref_dict['ID'] = code_ref.find('.//ID').text
code_ref_dict['Version'] = code_ref.find('.//Version').text
code_ref_dict['TypeOfObject'] = code_ref.find('.//TypeOfObject').text
dim_dict['CodeListReference'] = code_ref_dict
grid_dimension_list.append(dim_dict)
info['GridDimension'] = grid_dimension_list
num_domain_dict = {}
NumericDomain = root.find('.//NumericDomain')
if not NumericDomain is None:
num_domain_dict['NumericTypeCode'] = root.find(".//NumericTypeCode").text
num_domain_dict['Content'] = root.find(".//Label/Content").text
if NumericDomain.find(".//NumberRange/Low") is not None:
num_domain_dict['NumberRangeLow'] = NumericDomain.find(".//NumberRange/Low").text
else:
num_domain_dict['NumberRangeLow'] = None
if NumericDomain.find(".//NumberRange/High") is not None:
num_domain_dict['NumberRangeHigh'] = NumericDomain.find(".//NumberRange/High").text
else:
num_domain_dict['NumberRangeHigh'] = None
info['NumericDomain'] = num_domain_dict
return info
def root_to_dict_code_set(root):
"""
Part of parse xml, item_type = Code Set
"""
info = {}
info['URN'] = root.find('.//URN').text
info['UserID'] = root.find('.//UserID').text
info['Label'] = root.find('.//Label/Content').text
# Codes
codes = root.findall('.//Code')
code_list = []
for x, code in enumerate(codes):
code_dict={}
code_dict['URN'] = code.find('.//URN').text
code_dict['Agency'] = code.find('.//Agency').text
code_dict['ID'] = code.find('.//ID').text
code_dict['Version'] = code.find('.//Version').text
code_dict['Value'] = code.find('.//Value').text
cat_ref_dict = {}
cat = code.find('CategoryReference')
if not cat is None:
cat_ref_dict['Agency'] = cat.find('.//Agency').text
cat_ref_dict['ID'] = cat.find('.//ID').text
cat_ref_dict['Version'] = cat.find('.//Version').text
cat_ref_dict['TypeOfObject'] = cat.find('.//TypeOfObject').text
code_dict['CategoryReference'] = cat_ref_dict
code_list.append(code_dict)
info['Code'] = code_list
return info
def root_to_dict_category(root):
"""
Part of parse xml, item_type = Category
"""
info = {}
info['URN'] = root.find('.//URN').text
info['UserID'] = root.find('.//UserID').text
info['Name'] = root.find('.//CategoryName/String').text
if not root.find('.//Label/Content') is None:
info['Label'] = root.find('.//Label/Content').text
else:
info['Label'] = None
return info
def root_to_dict_question_activity(root):
"""
Part of parse xml, item_type = Question Activity
"""
info = {}
info['URN'] = root.find('.//QuestionConstruct/URN').text
info['UserID'] = root.find('.//QuestionConstruct/UserID').text
info['ConstructName'] = root.find('.//QuestionConstruct/ConstructName/String').text
info['Label'] = root.find('.//QuestionConstruct/Label/Content').text
info['ResponseUnit'] = root.find('.//QuestionConstruct/ResponseUnit').text
# QuestionReference
QuestionReference = root.find('.//QuestionConstruct/QuestionReference')
question_ref_dict = {}
if not QuestionReference is None:
question_ref_dict['Agency'] = QuestionReference.find('.//Agency').text
question_ref_dict['ID'] = QuestionReference.find('.//ID').text
question_ref_dict['Version'] = QuestionReference.find('.//Version').text
question_ref_dict['TypeOfObject'] = QuestionReference.find('.//TypeOfObject').text
info['QuestionReference'] = question_ref_dict
return info
def root_to_dict_variable(root):
"""
Part of parse xml, item_type = Variable
"""
info = {}
info['URN'] = root.find('.//Variable/URN').text
info['UserID'] = root.find('.//Variable/UserID').text
info['VariableName'] = root.find('.//Variable/VariableName/String').text
info['Label'] = root.find('.//Variable/Label/Content').text
# QuestionReference
QuestionReference = root.find('.//Variable/QuestionReference')
question_ref_dict = {}
if not QuestionReference is None:
question_ref_dict['Agency'] = QuestionReference.find('.//Agency').text
question_ref_dict['ID'] = QuestionReference.find('.//ID').text
question_ref_dict['Version'] = QuestionReference.find('.//Version').text
question_ref_dict['TypeOfObject'] = QuestionReference.find('.//TypeOfObject').text
info['QuestionReference'] = question_ref_dict
# VariableRepresentation/CodeRepresentation
CodeRepresentation = root.find('.//Variable/VariableRepresentation/CodeRepresentation')
code_rep_dict = {}
if not CodeRepresentation is None:
code_rep_dict['RecommendedDataType'] = CodeRepresentation.find('.//RecommendedDataType').text
# CodeListReference
code_ref = {}
CodeListReference = CodeRepresentation.find('.//CodeListReference')
if not CodeListReference is None:
code_ref['ID'] = CodeListReference.find('.//ID').text
code_ref['Version'] = CodeListReference.find('.//Version').text
code_ref['TypeOfObject'] = CodeListReference.find('.//TypeOfObject').text
code_rep_dict['CodeListReference'] = code_ref
info['CodeRepresentation'] = code_rep_dict
return info
def root_to_dict_conditional(root):
"""
Part of parse xml, item_type = Conditional
"""
info = {}
info['URN'] = root.find('.//IfThenElse/URN').text
info['UserID'] = root.find('.//IfThenElse/UserID').text
info['ConstructName'] = root.find('.//IfThenElse/ConstructName/String').text
IfCondition = root.find('.//IfThenElse/IfCondition')
ifcondition_dict = {}
if not IfCondition is None:
ifcondition_dict['Description'] = IfCondition.find('.//Description/Content').text
ifcondition_dict['ProgramLanguage'] = IfCondition.find('.//Command/ProgramLanguage').text
ifcondition_dict['CommandContent'] = IfCondition.find('.//Command/CommandContent').text
info['IfCondition'] = ifcondition_dict
# ThenConstructReference
ThenReference = root.findall('.//ThenConstructReference')
then_list = []
for x, ThenRef in enumerate(ThenReference):
then_ref_dict = {}
then_ref_dict['Agency'] = ThenRef.find('.//Agency').text
then_ref_dict['ID'] = ThenRef.find('.//ID').text
then_ref_dict['Version'] = ThenRef.find('.//Version').text
then_ref_dict['TypeOfObject'] = ThenRef.find('.//TypeOfObject').text
then_list.append(then_ref_dict)
info['IfThenReference'] = then_list
return info
def root_to_dict_loop(root):
"""
Part of parse xml, item_type = Loop
"""
info = {}
info['URN'] = root.find('.//Loop/URN').text
info['UserID'] = root.find('.//Loop/UserID').text
info['ConstructName'] = root.find('.//Loop/ConstructName/String').text
InitialValue = root.find('.//Loop/InitialValue')
InitialValue_dict = {}
if not InitialValue is None:
InitialValue_dict['ProgramLanguage'] = InitialValue.find('.//Command/ProgramLanguage').text
InitialValue_dict['CommandContent'] = InitialValue.find('.//Command/CommandContent').text
info['InitialValue'] = InitialValue_dict
LoopWhile = root.find('.//Loop/LoopWhile')
LoopWhile_dict = {}
if not LoopWhile is None:
LoopWhile_dict['ProgramLanguage'] = LoopWhile.find('.//Command/ProgramLanguage').text
LoopWhile_dict['CommandContent'] = LoopWhile.find('.//Command/CommandContent').text
info['LoopWhile'] = LoopWhile_dict
#TODO StepValue
StepValue = root.find('.//Loop/StepValue')
StepValue_dict = {}
if not StepValue is None:
print('TODO StepValue')
# ControlConstructReference
CCReference = root.find('.//Loop/ControlConstructReference')
cc_ref_dict = {}
if not CCReference is None:
cc_ref_dict['Agency'] = CCReference.find('.//Agency').text
cc_ref_dict['ID'] = CCReference.find('.//ID').text
cc_ref_dict['Version'] = CCReference.find('.//Version').text
cc_ref_dict['TypeOfObject'] = CCReference.find('.//TypeOfObject').text
info['ControlConstructReference'] = cc_ref_dict
return info
def parse_xml(xml, item_type):
"""
Used for parsing Item value
item_type in:
- Series
- Study
- Metadata Package
- Data Collection
- Sequence
- Statement
- Organization
- Instrument
- Question Group
- Concept
- Question
- Question Grid
- Code Set
- Interviewer Instruction
- Category
- Question Activity
- Variable
- Conditional
- Loop
"""
root = remove_xml_ns(xml)
if item_type == 'Series':
info = root_to_dict_series(root)
elif item_type == 'Study':
info = root_to_dict_study(root)
elif item_type == 'Metadata Package':
info = root_to_dict_metadata_package(root)
elif item_type == 'Data Collection':
info = root_to_dict_data_collection(root)
elif item_type == 'Sequence':
info = root_to_dict_sequence(root)
elif item_type == 'Statement':
info = root_to_dict_statement(root)
elif item_type == 'Organization':
info = root_to_dict_organization(root)
elif item_type == 'Instrument':
info = root_to_dict_instrument(root)
elif item_type == 'Question Group':
info = root_to_dict_question_group(root)
elif item_type == 'Concept':
info = root_to_dict_concept(root)
elif item_type == 'Question':
info = root_to_dict_question(root)
elif item_type == 'Question Grid':
info = root_to_dict_question_grid(root)
elif item_type == 'Code Set':
info = root_to_dict_code_set(root)
elif item_type == 'Interviewer Instruction':
info = root_to_dict_interviewer_instruction(root)
elif item_type == 'Category':
info = root_to_dict_category(root)
elif item_type == 'Question Activity':
info = root_to_dict_question_activity(root)
elif item_type == 'Variable':
info = root_to_dict_variable(root)
elif item_type == 'Conditional':
info = root_to_dict_conditional(root)
elif item_type == 'Loop':
info = root_to_dict_loop(root)
else:
info = {}
return info
class ColecticaObject(api.ColecticaLowLevelAPI):
"""Ask practical questions to Colectica."""
def item_to_dict(self, AgencyId, Identifier):
"""
From an agency ID and an identifier, get information using get_an_item
Return a dictionary
"""
result = self.get_an_item(AgencyId, Identifier)
info = {}
item_info = None
if not result is None:
for k, v in result.items():
if k == 'ItemType':
info[k] = self.item_code_inv(v)
elif k == 'Item':
item_info = parse_xml(v, self.item_code_inv(result['ItemType']))
else:
info[k] = v
d = {**info, **item_info}
else:
d = {}
return d
def get_a_set_to_df(self, AgencyId, Identifier, Version):
"""
From a study, find all questions
Example:
'ItemType': 'f196cc07-9c99-4725-ad55-5b34f479cf7d', (Instrument)
'AgencyId': 'uk.cls.nextsteps',
'Version': 1,
'Identifier': 'a6f96245-5c00-4ad3-89e9-79afaefa0c28'
"""
l = self.get_a_set_typed(AgencyId, Identifier, Version)
# print(l)
df = pd.DataFrame(
[self.item_code_inv(l[i]["Item2"]), l[i]["Item1"]["Item1"]] for i in range(len(l))
)
df.columns = ["ItemType", "Identifier"]
return df
def item_info_set(self, AgencyId, Identifier):
"""
From an ID, find it's name and set
"""
info = self.item_to_dict(AgencyId, Identifier)
df = self.get_a_set_to_df(AgencyId, Identifier, str(info['Version']))
return df, info
def get_question_group_info(self, AgencyId, Identifier):
"""
From a question identifier, get information about it
"""
question_group = self.get_an_item(AgencyId, Identifier)
root = remove_xml_ns(question_group["Item"])
question = {}
for k, v in question_result.items():
if k == 'ItemType':
question[k] = self.item_code_inv(v)
elif k == 'Item':
question['UserID'] = root.find(".//UserID").text
question['QuestionLabel'] = root.find(".//UserAttributePair/AttributeValue").text
question['QuestionItemName'] = root.find(".//QuestionItemName/String").text
question['QuestionLiteral'] = root.find(".//QuestionText/LiteralText/Text").text
else:
question[k] = v
return question
def get_question_all(self, AgencyId, Identifier):
"""
From a question ID, return question info and it's response
"""
# print(AgencyId, Identifier)
question_info = self.item_to_dict(AgencyId, Identifier)
# print(question_info)
if question_info['Response']== {}:
QI_response_type = None
else:
QI_response_type = question_info['Response']['response_type']
question_data = [ [ question_info['QuestionURN'],
question_info['QuestionUserID'],
question_info['QuestionLabel'],
question_info['QuestionItemName'],
question_info['QuestionLiteral'],
QI_response_type ] ]
df_question = pd.DataFrame(question_data,
columns=['QuestionURN', 'QuestionUserID', 'QuestionLabel', 'QuestionItemName',
'QuestionLiteral', 'response_type'])
# instruction
if question_info['Instruction'] != {}:
instruction_dict = self.item_to_dict(question_info['Instruction']['Agency'], question_info['Instruction']['ID'])
df_question['Instruction_URN'] = instruction_dict['InstructionURN']
df_question['Instruction'] = instruction_dict['InstructionText']
else:
df_question['Instruction_URN'] = None
df_question['Instruction'] = None
if QI_response_type == 'CodeList':
code_result = self.item_to_dict(AgencyId, question_info['Response']['CodeList_ID'])
code_list_sourceId = code_result['UserID']
code_list_label = code_result['Label']
code_list = code_result['Code']
df = | pd.DataFrame(columns=['response_type', 'Value', 'Name', 'ID', 'Label']) | pandas.DataFrame |
import json
import os
import sqlite3
import pyAesCrypt
import pandas
from os import stat
from datetime import datetime
import time
import numpy
# Global variables for use by this file
bufferSize = 64*1024
password = os.environ.get('ENCRYPTIONPASSWORD')
# py -c 'import databaseAccess; databaseAccess.reset()'
def reset():
resetActivities()
resetSplits()
# py -c 'import databaseAccess; databaseAccess.resetActivities()'
def resetActivities():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS activities;')
conn.commit()
conn.close()
encryptDatabase()
# py -c 'import databaseAccess; databaseAccess.resetSplits()'
def resetSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS splits;')
conn.commit()
conn.close()
encryptDatabase()
def getLastDate():
decryptDatabase()
lastActivityDate = '1970-01-01T00:00:00Z'
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='activities';")
result = cur.fetchone()
if result is not None:
# There is data, so let's grab the max datetime
cur.execute("SELECT MAX(start_date_local) FROM activities;")
result = cur.fetchone()
if result is not None:
# Found a max date
lastActivityDate, = result
conn.commit()
conn.close()
encryptDatabase()
return lastActivityDate
def setConfig(strava_tokens):
decryptDatabase()
print('Lets put the tokens into the database')
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS config;')
cur.execute('CREATE TABLE config (token_type VARCHAR, access_token VARCHAR, expires_at BIGINT, expires_in INT, refresh_token VARCHAR);')
cur.execute('INSERT INTO config (token_type, access_token, expires_at, expires_in, refresh_token) values (?, ?, ?, ?, ?);', (strava_tokens['token_type'], strava_tokens['access_token'], strava_tokens['expires_at'], strava_tokens['expires_in'], strava_tokens['refresh_token']))
conn.commit()
conn.close()
encryptDatabase()
def getConfig():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('SELECT * FROM config')
rows = cur.fetchall()
conn.commit()
conn.close()
encryptDatabase()
return json.loads(json.dumps( [dict(ix) for ix in rows] ))[0]
# Must be called to access the database, otherwise it can't be read
# py -c 'import databaseAccess; databaseAccess.decryptDatabase()'
def decryptDatabase():
if os.path.exists('strava_temp.sqlite'):
print('Database already decrypted! Skipping. . .')
else:
if os.path.exists('strava.sqlite'):
encFileSize = stat('strava.sqlite').st_size
with open('strava.sqlite', 'rb') as fIn:
with open('strava_temp.sqlite', 'wb') as fOut:
pyAesCrypt.decryptStream(fIn, fOut, password, bufferSize, encFileSize)
else:
print('Unable to find database to decrypt! Skipping. . .')
# Always call this after you touch the database to re-encrypt it
def encryptDatabase():
if os.path.exists('strava_temp.sqlite'):
if os.path.exists('strava.sqlite'):
os.remove('strava.sqlite')
with open('strava_temp.sqlite', 'rb') as fIn:
with open('strava.sqlite', 'wb') as fOut:
pyAesCrypt.encryptStream(fIn, fOut, password, bufferSize)
if os.path.exists('strava_temp.sqlite'):
os.remove('strava_temp.sqlite')
else:
print('Unable to find database to encrypt, skipping...')
def setActvities(activities):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS activities (id BIGINT, name NVARCHAR, upload_id BIGINT, type VARCHAR, distance NUMERIC, moving_time INT, average_speed NUMERIC, max_speed NUMERIC, total_elevation_gain NUMERIC, start_date_local DATETIME, average_cadence NUMERIC, average_watts NUMERIC, average_heartrate NUMERIC, UNIQUE(id));')
conn.commit()
for _, currentActivity in activities.iterrows():
acitivityName = currentActivity['name']
activityId = currentActivity['id']
print(f'Insert activity id [{activityId}], [{acitivityName}] to database')
cur.execute('INSERT OR IGNORE INTO activities (id, name, upload_id, type, distance, moving_time, average_speed, max_speed, total_elevation_gain, start_date_local, average_cadence, average_watts, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (activityId, acitivityName, currentActivity['upload_id'], currentActivity['type'], currentActivity['distance'], currentActivity['moving_time'], currentActivity['average_speed'], currentActivity['max_speed'], currentActivity['total_elevation_gain'], currentActivity['start_date_local'], currentActivity['average_cadence'], currentActivity['average_watts'], currentActivity['average_heartrate']))
conn.commit()
print(f'[{acitivityName}] done. . .')
conn.close()
encryptDatabase()
def setSplits(splits):
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS splits (split_id INT, activity_id BIGINT, activity_date DATETIME, average_speed NUMERIC, distance NUMERIC, elapsed_time INT, elevation_difference NUMERIC, moving_time INT, pace_zone INT, split INT, average_grade_adjusted_speed NUMERIC, average_heartrate NUMERIC, UNIQUE(split_id, activity_id));')
conn.commit()
for index, row in splits.iterrows():
cur.execute('INSERT OR IGNORE INTO splits (split_id, activity_id, activity_date, average_speed, distance, elapsed_time, elevation_difference, moving_time, pace_zone, split, average_grade_adjusted_speed, average_heartrate) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', (index, row['id'], row['date'], row['average_speed'], row['distance'], row['elapsed_time'], row['elevation_difference'], row['moving_time'], row['pace_zone'], row['split'], row['average_grade_adjusted_speed'], row['average_heartrate']))
conn.commit()
conn.close()
encryptDatabase()
def getActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedActivities = pandas.DataFrame()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)', conn)
else:
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
storedActivities = pandas.read_sql_query('SELECT * FROM activities', conn)
conn.commit()
conn.close()
encryptDatabase()
return storedActivities
def deleteActvitiesMissingSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
if result is not None:
cur = conn.cursor()
cur.execute('DELETE FROM activities WHERE id NOT IN (SELECT activity_id FROM splits)')
conn.commit()
conn.close()
encryptDatabase()
def getSplits():
decryptDatabase()
conn = sqlite3.connect('strava_temp.sqlite')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='splits';")
result = cur.fetchone()
storedSplits = | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import scipy.stats as sp
import os
import pandas as pd
import h5py
import bokeh.io as bkio
import bokeh.layouts as blay
import bokeh.models as bmod
import bokeh.plotting as bplt
from bokeh.palettes import Category20 as palette
from bokeh.palettes import Category20b as paletteb
import plot_results as plt_res
import frequency_analysis as fan
colrs = palette[20] + paletteb[20] + palette[20] + paletteb[20]
def save_data_to_hdf5(data_folder_path, hdf5_file_path):
d_paths = [f_file for f_file in os.listdir(data_folder_path) if f_file.endswith('axgt')]
with pd.HDFStore(hdf5_file_path) as h5file:
for d_path in d_paths:
f_path = os.path.join(data_folder_path, d_path)
d_arr = np.loadtxt(f_path, dtype={'names': ('time', 'Potential', 'Im', 'ACh'),
'formats': ('float', 'float', 'float', 'float')},
skiprows=1)
d_df = pd.DataFrame(d_arr)
d_name = d_path.split('.')[0].replace(' ', '_').replace('(', '').replace(')', '')
print(d_name)
h5file.put('{}/data'.format(d_name), d_df, format='table', data_columns=True)
print(h5file)
def plot_spike_data(in_h5_file, exclude_list=[]):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=3, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_ach_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Acetylcholine vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ACh'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_spike_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_spike_raster(in_h5_file, exclude_list=[]):
rast_fig = bplt.figure(title='Spike Raster vs Time')
rast_fig.xaxis.axis_label = 'time (sec)'
print('Plotting Spike Raster values from {}'.format(in_h5_file))
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 1
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_times' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
y_vals = f_i*np.ones(h5_data[f_name].shape)
if leg_name not in exclude_list:
my_circles.append(rast_fig.circle(h5_data[f_name], y_vals,
line_width=3, color=colrs[f_i-1])
)
legend_items.append((leg_name, [my_circles[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
rast_fig.add_layout(my_legend, 'right')
return rast_fig
def plot_instantaneous_spike_rate(in_h5_file, exclude_list=[], t_start=0):
isr_fig = bplt.figure(title='Instantaneous Spike Rate vs Time')
isr_fig.xaxis.axis_label = 'time (sec)'
isr_fig.yaxis.axis_label = 'spike rate (Hz)'
print('Plotting instantaneous spike rate from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(isr_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
line_width=3, color=colrs[f_i])
)
my_circles.append(isr_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
isr_fig.add_layout(my_legend, 'right')
isr_fig.x_range.start = t_start
return isr_fig
def plot_spike_accel(in_h5_file, exclude_list=[], normalize=False, t_start=0):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
if normalize:
max_accel = np.max(h5_data[f_name]['Spike_Accel'])
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel']/max_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'],
h5_data[f_name]['Spike_Accel']/max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
acc_fig.x_range.start = t_start
return acc_fig
def plot_spike_accel_aligned(in_h5_file, exclude_list=[], normalize=False):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_time = h5_data[name + '/ach_times'][0] + 0.5
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name+'/spike_times'] > ach_time].to_numpy()
acc_isr = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
sp0 = acc_spikes[0]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accel = (acc_isr - freq_val)/freq_val*100
if normalize:
max_accel = np.max(sp_accel)
my_lines.append(
acc_fig.line(acc_t-sp0, sp_accel / max_accel,
line_width=2, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel / max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(acc_t-sp0, sp_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel,
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
return acc_fig
def plot_spike_cessation(in_h5_file, exclude_list=[], add_mean=True):
cess_names = []
cess_vals = []
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(leg_name)
cess_vals.append(1.0/np.min(h5_data[f_name]['ISR']))
if add_mean:
mean_cess = np.mean(cess_vals)
cess_vals.append(mean_cess)
all_names = cess_names
mean_name = 'Mean: {0:.2f} sec'.format(mean_cess)
all_names.append(mean_name)
else:
all_names = cess_names
cess_fig = bplt.figure(x_range=all_names, title='Duration of Spike Cessation after ACh')
cess_fig.yaxis.axis_label = 'duration (sec)'
cess_fig.vbar(x=cess_names, top=cess_vals, width=0.9, color=colrs[0])
if add_mean:
cess_fig.vbar(x=[mean_name], top=[mean_cess], width=0.9, color='red')
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.y_range.start = 0.0
return cess_fig
def plot_average_ifr(in_h5_file, exclude_list=[]):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
h5_df = h5_df.sort_values(by=['Filename'])
sel_tab = h5_data['frequency_table'][~h5_data['frequency_table']['Legend'].isin(exclude_list)]
sel_tab.sort_values('Legend', inplace=True)
x_names = sel_tab['Legend'].tolist()
x_names.append('Average')
cess_fig = bplt.figure(x_range=x_names,
title='Average Pre-ACh Frequency and ISR')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['Frequency'],
width=0.9, color='blue', alpha=0.6, legend='Frequency')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['ISR_Mean'],
width=0.6, color='red', alpha=0.6, legend='ISR')
mean_isr = np.mean(sel_tab['ISR_Mean'])
mean_freq = np.mean(sel_tab['Frequency'])
cess_fig.vbar(x=['Average'], top=[mean_freq], width=0.9, color='navy', alpha=0.6)
cess_fig.vbar(x=['Average'], top=[mean_isr], width=0.6, color='maroon', alpha=0.6)
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.yaxis.axis_label = 'frequency (Hz)'
cess_fig.y_range.start = 0.0
cess_fig.legend.location = 'top_right'
return cess_fig
def plot_average_curve(in_h5_file, time_start=8.5, time_bin_size=0.1, exclude_list=[], spike_acceleration=False,
return_curve=False):
long_time = 0
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
# get longest recorded time
for f_name in name_sort:
if 'data' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
e_time = np.max(h5_data[f_name]['time'])
if e_time > long_time:
long_time = e_time
# make array of time bins
t_bins = np.arange(time_start, long_time+time_bin_size, time_bin_size)
isr_avg = np.zeros((t_bins.size - 1,))
acc_avg = np.zeros((t_bins.size - 1,))
c_count = np.zeros((t_bins.size - 1,))
for f_name in name_sort:
if 'spike_times' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name + '/spike_times'] > time_start].to_numpy()
acc_isrs = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accels = (acc_isrs - freq_val) / freq_val * 100
sp_is = np.digitize(acc_t, t_bins)
for sp_i, sp_acc, sp_isr in zip(sp_is, sp_accels, acc_isrs):
isr_avg[sp_i] += sp_isr
acc_avg[sp_i] += sp_acc
c_count[sp_i] += 1
isr_avg = np.divide(isr_avg, c_count, where=np.greater(c_count, 0))
acc_avg = np.divide(acc_avg, c_count, where=np.greater(c_count, 0))
if spike_acceleration:
avg_fig = bplt.figure(title='Average Acceleration Versus Time')
avg_fig.yaxis.axis_label = 'spike acceleration (%)'
avg_fig.line(t_bins[:-1], acc_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], acc_avg, size=12, color=colrs[0])
else:
avg_fig = bplt.figure(title='Average Instantaneous Spike Rate Versus Time')
avg_fig.yaxis.axis_label = 'ISR (Hz)'
avg_fig.line(t_bins[:-1], isr_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], isr_avg, size=12, color=colrs[0])
avg_fig.xaxis.axis_label = 'time (sec)'
if return_curve:
if spike_acceleration:
return avg_fig, t_bins[:-1], acc_avg
else:
return avg_fig, t_bins[:-1], isr_avg
else:
return avg_fig
def plot_spike_cessation_vs_isr_variance(in_h5_file, exclude_list=[]):
cess_names = []
cess_vals = []
ifr_vars = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(name)
cess_vals.append(1.0 / np.min(h5_data[f_name]['ISR']))
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
ifr_vars.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
cess_fig = bplt.figure(title='Spike Cessation vs ISR Variance')
cess_fig.circle(cess_vals, ifr_vars, size=12, color=colrs[0])
cess_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
cess_fig.yaxis.axis_label = 'variance of ISR (Hz)'
return cess_fig
def plot_peak_acceleration_vs_spike_cessation(in_h5_file, exclude_list=[]):
fail_acc = []
fail_cess = []
fail_names = []
succ_acc = []
succ_cess = []
succ_names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
cess_val = 1.0 / np.min(h5_data[f_name]['ISR'])
acc_i = np.where(h5_data[f_name]['time'] < ach_start)
max_acc_pre = np.max(h5_data[f_name].loc[h5_data[f_name]['time'] < ach_start, 'Spike_Accel'].tolist())
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
if max_acc <= 1.1*max_acc_pre:
fail_acc.append(max_acc)
fail_cess.append(cess_val)
fail_names.append(leg_name)
else:
succ_acc.append(max_acc)
succ_cess.append(cess_val)
succ_names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs Duration of Spike Cessation')
acc_fig.circle(fail_cess, fail_acc, size=12, color='red', legend='no acceleration')
acc_fig.circle(succ_cess, succ_acc, size=12, color='green', legend='acceleration')
acc_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
print('Failed to Demonstrate Spike Acceleration')
print(fail_names)
print('Demonstrated at least 10% increase in ISR')
print(succ_names)
return acc_fig
def plot_peak_acceleration_vs_isr_variance(in_h5_file, exclude_list=[]):
acc_vals = []
var_vals = []
names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
var_vals.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
acc_vals.append(max_acc)
names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs ISR Variance')
acc_fig.circle(var_vals, acc_vals, size=12, color=colrs[0])
acc_fig.xaxis.axis_label = 'variance of ISR (Hz)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
return acc_fig
def print_average_table(in_h5_file):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
print(h5_df)
def analyze_spike_data_from_hdf5(in_h5_file):
avg_freqs = []
avg_isrs = []
var_isrs = []
cell_names = []
legend_names = []
with | pd.HDFStore(in_h5_file) | pandas.HDFStore |
# Author : <EMAIL>
# Date : 2020-12-03
import logging
import numpy as np
import pandas as pd
import os, glob, time, datetime
import pickle
import gzip
import copy
import json
import cv2
import random
import torch
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
import torch.distributed as dist
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from multiprocessing import Pool
from torch.optim import lr_scheduler
import torch.optim as optim
import torch.nn as nn
import torchvision.transforms as A
from PIL import Image
from efficientnet_pytorch import EfficientNet
import matplotlib.pyplot as plt
import pydicom as dicom
from pydicom.pixel_data_handlers.numpy_handler import unpack_bits
from torch.utils.tensorboard import SummaryWriter
disease = ['Sinusitis','Oral_cancer'][0]
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
def get_logger(name, level=logging.DEBUG, resetlogfile=False, path='log'):
fname = os.path.join(path, name+'.log')
os.makedirs(path, exist_ok=True)
if resetlogfile :
if os.path.exists(fname):
os.remove(fname)
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler(fname)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
today_datever = datetime.datetime.now().strftime("%y%m%d")
logger = get_logger(f'{disease}_EfficientNet_{today_datever}', resetlogfile=True)
logger.setLevel(logging.INFO)
def prepare_metatable(filenames):
cvdf = pd.DataFrame(filenames)
cvdf.columns = ['filename']
cvdf['patientID'] = cvdf['filename'].apply(lambda x : x.split('/')[-1].split('_')[0])
#cvdf['year'] = cvdf['filename'].apply(lambda x : x.split('/')[-2].split('_')[1])
cvdf['left_label_org'] = cvdf['filename'].apply(lambda x : x.replace('.dcm', '').replace('.jpg', '').split('_')[-2])
cvdf['right_label_org'] = cvdf['filename'].apply(lambda x : x.replace('.dcm', '').replace('.jpg', '').split('_')[-1])
#print(pd.crosstab(cvdf['left_label_org'], cvdf['right_label_org'], margins=True))
cvdf['left_label'] = '1'
cvdf.at[cvdf['left_label_org']=='0','left_label'] = '0'
cvdf.at[cvdf['left_label_org']=='x','left_label'] = 'x'
cvdf['right_label'] = '1'
cvdf.at[cvdf['right_label_org']=='0','right_label'] = '0'
cvdf.at[cvdf['right_label_org']=='x','right_label'] = 'x'
#print(pd.crosstab(cvdf['left_label'], cvdf['right_label']))
cvdf['FOLD'] = np.nan
oldcolumns = cvdf.columns.tolist()
cvdf['index'] = cvdf.index
cvdf = cvdf[['index']+oldcolumns]
return cvdf
def save_validationlist(root='.'):
# list up filenames of valid data
# totalfiles = glob.glob(os.path.join(root,"test_20??_withUPID","*.dcm"))
# filenames = glob.glob(os.path.join(root,"test_20??_withUPID","*_[0-3]_[0-3].dcm"))
data_dir = ["final_dcm","final_crop"][0]
logger.info('['*10+' '*20 +'START ANALYSIS'+' '*20+ ']'*10)
filenames = glob.glob(os.path.join(root,data_dir,"*" + (".dcm" if data_dir=='final_dcm' else '.jpg')))
logger.info(f'No. of total datasets : {len(filenames)} patients') # 6516
rmfn = glob.glob(os.path.join(root,data_dir,"*_x_x"+(".dcm" if data_dir=='final_dcm' else '.jpg')))
if len(rmfn)>1:
logger.info(' x_x.dcm :')
logger.info(rmfn)
filenames.remove(rmfn)
logger.info(f'No. of valid datasets : {len(filenames)} patients (excluded x_x.dcm )') #2980 (20.10.7 ver)
cvdf = prepare_metatable(filenames)
n_folds = 10
plen = len(filenames)
logger.info(f'----- Split patients for {n_folds} Cross-validation')
skf = StratifiedKFold(n_splits=n_folds, random_state=42, shuffle=True)
for ii, (train_pindex, test_pindex) in enumerate(skf.split(range(plen),cvdf['left_label'])):
# record fold index
cvdf.at[test_pindex,'FOLD']= ii
cvdf[f'FOLD{ii}_testset'] = 0
cvdf.at[test_pindex,f'FOLD{ii}_testset'] = 1
# save metadata
filelist_dir = os.path.join(root,'inputlist')
os.makedirs(filelist_dir, exist_ok=True)
cvdf.to_csv(os.path.join(filelist_dir,"input_metadata_table.csv"),index=False)
cvdf[['index','filename']].to_csv(os.path.join(filelist_dir,"input_filenames_total.csv"),index=False)
for i in range(n_folds):
cvdf.loc[cvdf[f'FOLD{i}_testset']==1,'filename'].to_csv(os.path.join(filelist_dir,f"input_filenames_fold{i}.csv"),index=False)
# statistics
logger.info(f'----- Data statistics by fold',cvdf['FOLD'].value_counts())
logger.info(cvdf['FOLD'].value_counts())
labelfreq_left = pd.crosstab(cvdf['FOLD'], cvdf['left_label'], margins=True)
labelfreq_left_ratio = pd.crosstab(cvdf['FOLD'], cvdf['left_label'], margins=True, normalize='index')
labelfreq_right = pd.crosstab(cvdf['FOLD'], cvdf['right_label'], margins=True)
labelfreq_right_ratio = pd.crosstab(cvdf['FOLD'], cvdf['right_label'], margins=True, normalize='index')
labelfreq = pd.concat([labelfreq_left, labelfreq_right], axis=1, keys=['left_sinus', 'right_sinus'], names=[' ','label'])
labelfreq_ratio = pd.concat([labelfreq_left_ratio, labelfreq_right_ratio], axis=1, keys=['left_sinus', 'right_sinus'], names=[' ','label (ratio)'])
labelfreq.to_csv(os.path.join(filelist_dir,f"label_freq_byfold.csv"))
labelfreq_ratio.to_csv(os.path.join(filelist_dir,f"label_freq_ratio_byfold.csv"),float_format = '%.2f')
logger.info(f'----- Label frequency by fold')
logger.info(labelfreq)
logger.info(f'----- Label frequency (ratio) by fold')
logger.info(labelfreq_ratio)
import multiprocessing
class ImageDataset(Dataset):
def __init__(self, root='.', input_csv='inputlist/input_filenames_fold', annotation_path=None,
fold_num=0, data_type='train', carrydata=True, transform=None, savejpg=True):
super(ImageDataset, self).__init__()
self.root = root
self.input_csv = input_csv
self.annotation_path = annotation_path
self.fold_num = fold_num
self.data_type = data_type # train, val, test
self.carrydata = carrydata
self.transform = transform
self.savejpg = savejpg
if self.annotation_path is not None:
json_file = open(self.annotation_path)
roi_annotation = json.load(json_file) #coco
json_file.close()
self.roi_dict = dict()
for segmentation in roi_annotation:
image_name = list(segmentation.keys())[0].replace('.dcm','')
bbox_dict = list(segmentation.values())[0][-1]
assert bbox_dict['name']=='bounding box'
self.roi_dict[image_name] = bbox_dict['points'] # {'00001334_0_0' : [128.91, 230, 920.48, 786.83]}
logger.info('--'*20)
logger.info(f"- Build {self.data_type} dataset")
logger.info('-- Transform')
logger.info(self.transform)
logger.info('-- ')
n_folds = 10
train_fold = list(range(n_folds))
val_fold = train_fold[fold_num-1]
train_fold.remove(fold_num) # test set
train_fold.remove(val_fold) # validation set
if data_type=="train":
self.filenames = []
for i in train_fold:
fl_i = pd.read_csv(f'{input_csv}{i}.csv')['filename'].tolist()
self.filenames.extend(fl_i)
elif data_type=="val":
self.filenames = | pd.read_csv(f'{input_csv}{val_fold}.csv') | pandas.read_csv |
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
from __future__ import division
import operator
import random
from itertools import product
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core import Series
from cudf.core.index import as_index
from cudf.tests import utils
from cudf.utils.dtypes import (
BOOL_TYPES,
DATETIME_TYPES,
FLOAT_TYPES,
INTEGER_TYPES,
TIMEDELTA_TYPES,
)
STRING_TYPES = {"str"}
_binops = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _binops)
def test_series_binop(binop, obj_class):
nelem = 1000
arr1 = utils.gen_rand("float64", nelem) * 10000
# Keeping a low value because CUDA 'pow' has 2 full range error
arr2 = utils.gen_rand("float64", nelem) * 10
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
expect = binop(pd.Series(arr1), pd.Series(arr2))
if obj_class == "Index":
result = Series(result)
utils.assert_eq(result, expect)
@pytest.mark.parametrize("binop", _binops)
def test_series_binop_concurrent(binop):
def func(index):
arr = np.random.random(100) * 10
sr = Series(arr)
result = binop(sr.astype("int32"), sr)
expect = binop(arr.astype("int32"), arr)
np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)
from concurrent.futures import ThreadPoolExecutor
indices = range(10)
with ThreadPoolExecutor(4) as e: # four processes
list(e.map(func, indices))
@pytest.mark.parametrize("use_cudf_scalar", [False, True])
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem,binop", list(product([1, 2, 100], _binops)))
def test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):
arr = np.random.random(nelem)
rhs = random.choice(arr).item()
sr = Series(arr)
if obj_class == "Index":
sr = as_index(sr)
if use_cudf_scalar:
result = binop(sr, rhs)
else:
result = binop(sr, cudf.Scalar(rhs))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))
_bitwise_binops = [operator.and_, operator.or_, operator.xor]
_int_types = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("binop", _bitwise_binops)
@pytest.mark.parametrize(
"lhs_dtype,rhs_dtype", list(product(_int_types, _int_types))
)
def test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):
arr1 = (np.random.random(100) * 100).astype(lhs_dtype)
sr1 = Series(arr1)
arr2 = (np.random.random(100) * 100).astype(rhs_dtype)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(sr1, sr2)
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))
_logical_binops = [
(operator.and_, operator.and_),
(operator.or_, operator.or_),
(np.logical_and, cudf.logical_and),
(np.logical_or, cudf.logical_or),
]
@pytest.mark.parametrize("lhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("rhstype", _int_types + [np.bool_])
@pytest.mark.parametrize("binop,cubinop", _logical_binops)
def test_series_logical_binop(lhstype, rhstype, binop, cubinop):
arr1 = pd.Series(np.random.choice([True, False], 10))
if lhstype is not np.bool_:
arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)
sr1 = Series(arr1)
arr2 = pd.Series(np.random.choice([True, False], 10))
if rhstype is not np.bool_:
arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)
sr2 = Series(arr2)
result = cubinop(sr1, sr2)
expect = binop(arr1, arr2)
utils.assert_eq(result, expect)
_cmpops = [
operator.lt,
operator.gt,
operator.le,
operator.ge,
operator.eq,
operator.ne,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"dtype", ["int8", "int32", "int64", "float32", "float64", "datetime64[ms]"]
)
def test_series_compare(cmpop, obj_class, dtype):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
arr2 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
sr2 = Series(arr2)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result1 = cmpop(sr1, sr1)
result2 = cmpop(sr2, sr2)
result3 = cmpop(sr1, sr2)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
result3 = Series(result3)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))
np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))
np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))
@pytest.mark.parametrize(
"obj", [pd.Series(["a", "b", None, "d", "e", None]), "a"]
)
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize(
"cmp_obj", [pd.Series(["b", "a", None, "d", "f", None]), "a"]
)
def test_string_series_compare(obj, cmpop, cmp_obj):
g_obj = obj
if isinstance(g_obj, pd.Series):
g_obj = Series.from_pandas(g_obj)
g_cmp_obj = cmp_obj
if isinstance(g_cmp_obj, pd.Series):
g_cmp_obj = Series.from_pandas(g_cmp_obj)
got = cmpop(g_obj, g_cmp_obj)
expected = cmpop(obj, cmp_obj)
utils.assert_eq(expected, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize("nelem", [1, 2, 100])
@pytest.mark.parametrize("cmpop", _cmpops)
@pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES + ["datetime64[ms]"])
@pytest.mark.parametrize("use_cudf_scalar", [True, False])
def test_series_compare_scalar(
nelem, cmpop, obj_class, dtype, use_cudf_scalar
):
arr1 = np.random.randint(0, 100, 100).astype(dtype)
sr1 = Series(arr1)
rhs = random.choice(arr1).item()
if use_cudf_scalar:
rhs = cudf.Scalar(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
result1 = cmpop(sr1, rhs)
result2 = cmpop(rhs, sr1)
if obj_class == "Index":
result1 = Series(result1)
result2 = Series(result2)
np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))
np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))
_nulls = ["none", "some"]
@pytest.mark.parametrize("nelem", [1, 7, 8, 9, 32, 64, 128])
@pytest.mark.parametrize("lhs_nulls,rhs_nulls", list(product(_nulls, _nulls)))
def test_validity_add(nelem, lhs_nulls, rhs_nulls):
np.random.seed(0)
# LHS
lhs_data = np.random.random(nelem)
if lhs_nulls == "some":
lhs_mask = utils.random_bitmask(nelem)
lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]
lhs_null_count = utils.count_zero(lhs_bitmask)
assert lhs_null_count >= 0
lhs = Series.from_masked_array(lhs_data, lhs_mask)
assert lhs.null_count == lhs_null_count
else:
lhs = Series(lhs_data)
# RHS
rhs_data = np.random.random(nelem)
if rhs_nulls == "some":
rhs_mask = utils.random_bitmask(nelem)
rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]
rhs_null_count = utils.count_zero(rhs_bitmask)
assert rhs_null_count >= 0
rhs = Series.from_masked_array(rhs_data, rhs_mask)
assert rhs.null_count == rhs_null_count
else:
rhs = Series(rhs_data)
# Result
res = lhs + rhs
if lhs_nulls == "some" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool
)[:nelem]
if lhs_nulls == "some" and rhs_nulls == "none":
res_mask = np.asarray(
utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool
)[:nelem]
if lhs_nulls == "none" and rhs_nulls == "some":
res_mask = np.asarray(
utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool
)[:nelem]
# Fill NA values
na_value = -10000
got = res.fillna(na_value).to_array()
expect = lhs_data + rhs_data
if lhs_nulls == "some" or rhs_nulls == "some":
expect[~res_mask] = na_value
np.testing.assert_array_equal(expect, got)
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"binop,lhs_dtype,rhs_dtype",
list(
product(
[operator.add, operator.mul],
utils.NUMERIC_TYPES,
utils.NUMERIC_TYPES,
)
),
)
def test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):
nelem = 10
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = binop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"cmpop,lhs_dtype,rhs_dtype",
list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),
)
def test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):
nelem = 5
lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)
rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)
sr1 = Series(lhs)
sr2 = Series(rhs)
if obj_class == "Index":
sr1 = as_index(sr1)
sr2 = as_index(sr2)
result = cmpop(Series(sr1), Series(sr2))
if obj_class == "Index":
result = Series(result)
np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))
_reflected_ops = [
lambda x: 1 + x,
lambda x: 2 * x,
lambda x: 2 - x,
lambda x: 2 // x,
lambda x: 2 / x,
lambda x: 3 + x,
lambda x: 3 * x,
lambda x: 3 - x,
lambda x: 3 // x,
lambda x: 3 / x,
lambda x: 3 % x,
lambda x: -1 + x,
lambda x: -2 * x,
lambda x: -2 - x,
lambda x: -2 // x,
lambda x: -2 / x,
lambda x: -3 + x,
lambda x: -3 * x,
lambda x: -3 - x,
lambda x: -3 // x,
lambda x: -3 / x,
lambda x: -3 % x,
lambda x: 0 + x,
lambda x: 0 * x,
lambda x: 0 - x,
lambda x: 0 // x,
lambda x: 0 / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"func, dtype", list(product(_reflected_ops, utils.NUMERIC_TYPES))
)
def test_reflected_ops_scalar(func, dtype, obj_class):
# create random series
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
# gpu series
gs = Series(random_series)
# class typing
if obj_class == "Index":
gs = as_index(gs)
gs_result = func(gs)
# class typing
if obj_class == "Index":
gs = Series(gs)
# pandas
ps_result = func(random_series)
# verify
np.testing.assert_allclose(ps_result, gs_result.to_array())
_cudf_scalar_reflected_ops = [
lambda x: cudf.Scalar(1) + x,
lambda x: cudf.Scalar(2) * x,
lambda x: cudf.Scalar(2) - x,
lambda x: cudf.Scalar(2) // x,
lambda x: cudf.Scalar(2) / x,
lambda x: cudf.Scalar(3) + x,
lambda x: cudf.Scalar(3) * x,
lambda x: cudf.Scalar(3) - x,
lambda x: cudf.Scalar(3) // x,
lambda x: cudf.Scalar(3) / x,
lambda x: cudf.Scalar(3) % x,
lambda x: cudf.Scalar(-1) + x,
lambda x: cudf.Scalar(-2) * x,
lambda x: cudf.Scalar(-2) - x,
lambda x: cudf.Scalar(-2) // x,
lambda x: cudf.Scalar(-2) / x,
lambda x: cudf.Scalar(-3) + x,
lambda x: cudf.Scalar(-3) * x,
lambda x: cudf.Scalar(-3) - x,
lambda x: cudf.Scalar(-3) // x,
lambda x: cudf.Scalar(-3) / x,
lambda x: cudf.Scalar(-3) % x,
lambda x: cudf.Scalar(0) + x,
lambda x: cudf.Scalar(0) * x,
lambda x: cudf.Scalar(0) - x,
lambda x: cudf.Scalar(0) // x,
lambda x: cudf.Scalar(0) / x,
]
@pytest.mark.parametrize("obj_class", ["Series", "Index"])
@pytest.mark.parametrize(
"funcs, dtype",
list(
product(
list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),
utils.NUMERIC_TYPES,
)
),
)
def test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):
cpu_func, gpu_func = funcs
# create random series
np.random.seed(12)
random_series = utils.gen_rand(dtype, 100, low=10)
# gpu series
gs = Series(random_series)
# class typing
if obj_class == "Index":
gs = as_index(gs)
gs_result = gpu_func(gs)
# class typing
if obj_class == "Index":
gs = Series(gs)
# pandas
ps_result = cpu_func(random_series)
# verify
np.testing.assert_allclose(ps_result, gs_result.to_array())
@pytest.mark.parametrize("binop", _binops)
def test_different_shapes_and_columns(binop):
# TODO: support `pow()` on NaN values. Particularly, the cases:
# `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`
if binop is operator.pow:
return
# Empty frame on the right side
pd_frame = binop(pd.DataFrame({"x": [1, 2]}), | pd.DataFrame({}) | pandas.DataFrame |
import os
import pandas_datareader
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import pandas
import pandas as pd
import plotly.express as px
import pandas_datareader.data as web
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import numpy as np
def cumulativeDifference(stock):
"""Method: Gets the cumulative return based on the stock prices"""
for index in stock.columns[1:]:
stock[index] = stock[index] / stock[index][0]
return stock
def plotlyPlot(title, stock):
"""Method: Displays an interactive representation of given stock data in a
line graph on your browser"""
fig = px.line(title=title)
for index in stock.columns[1:]:
fig.add_scatter(x=stock['Date'], y=stock[index], name=index)
fig.show()
def individualStock(priceDataFrame , volumeDataFrame, name):
return pd.DataFrame({'Date':priceDataFrame['Date'], 'Close':priceDataFrame[name], 'Volume':volumeDataFrame[name]})
def tradingWindow(data, n):
"""Method: Creates a column that would form the price target prediction for a stock
by getting the price for n days after each price"""
dayShift = n
data['target'] = data[['Adj Close']].shift(-dayShift)
# Removes the last n rows to prevent errors
data = data[:-n]
return data
def LSTM (X_Train , X_Test):
# Reshape the 1D to 3D arrays to feed in the mode, reshaping the training data.
xTrain = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1] , 1))
xTest = np.reshape(X_Test, (X_Test.shape[0], X_Test.shape[1] , 1))
# Building the LSTM deep neural network model
inputLayer = keras.layers.Input(shape = (xTrain.shape[1] , xTrain.shape[2]))
# return_sequences=True : Basically connects to the previous layer..
hidden = keras.layers.LSTM(150, return_sequences=True) (inputLayer)
hidden = keras.layers.LSTM(150, return_sequences=True)(hidden)
hidden = keras.layers.LSTM(150, return_sequences=True)(hidden)
# The output layer
outputLayer = keras.layers.Dense(1 , activation='linear')(hidden)
# Creating the model itself
brainModel = keras.Model(inputs = inputLayer, outputs = outputLayer)
brainModel.compile(optimizer = 'adam', loss = 'mse')
brainModel.summary()
# validation split would perform cross validation..
brainModel.fit(X_Train , Y_Train, epochs = 20, batch_size = 32, validation_split = 0.2)
return brainModel
def retrieveData(Start, End, Ticker):
modifiedStart = pd.to_datetime(Start)
modifiedEnd = | pd.to_datetime(End) | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
import statsmodels.api as sm
#Importing data
# dataframe
df = pd.read_csv("<NAME>.txt", sep='\t')
print(df.axes)
#homework
def replacezerowith(array, average):
array[array == 0] = average
dfcopy = df.copy()
dfasarray = np.asarray(dfcopy.VALUE)
replacezerowith(dfasarray, np.nan)
dfcopy['VALUE'] = dfasarray
dfcopy['AVERAGE'] = dfcopy.VALUE.interpolate()
print(dfcopy)
#when we look at the new RMSE's, they provided smaller errors
size = len(dfcopy)
train = dfcopy[0:size-200]
test = dfcopy[size-200:]
df.DATE = pd.to_datetime(df.DATE,format="%Y-%m-%d")
df.index = df.DATE
train.DATE = pd.to_datetime(train.DATE,format="%Y-%m-%d")
train.index = train.DATE
test.DATE = | pd.to_datetime(train.DATE,format="%Y-%m-%d") | pandas.to_datetime |
from collections.abc import Sequence
from functools import partial
from math import isnan, nan
import pytest
from hypothesis import given
import hypothesis.strategies as st
from hypothesis.extra.pandas import indexes, columns, data_frames
import pandas as pd
import tahini.core.base
import tahini.testing
names_index_container_data_indexed = 'index'
name_index_internal = 'index_internal'
names_index_container_data_indexed_multi = ('index_0', 'index_1')
def get_data_frame(*args, name_index=names_index_container_data_indexed, **kwargs) -> pd.DataFrame:
return pd.DataFrame(*args, **kwargs).rename_axis(index=name_index)
def get_data_frame_internal(
*args,
index_internal=None,
name_index=names_index_container_data_indexed,
**kwargs,
) -> pd.DataFrame:
df = pd.DataFrame(*args, **kwargs).rename_axis(index=name_index).reset_index()
if index_internal is None:
index_internal = df[name_index]
df.index = pd.Index(index_internal, name=name_index_internal)
return df
def get_data_frame_index_multi(
*args,
names_index=names_index_container_data_indexed_multi,
index=None,
**kwargs,
) -> pd.DataFrame:
if index is None:
index = pd.MultiIndex(levels=[[]] * len(names_index), codes=[[]] * len(names_index), names=names_index)
else:
index = pd.MultiIndex.from_tuples(index, names=names_index)
return pd.DataFrame(*args, index=index, **kwargs)
def get_data_frame_internal_index_multi(
*args,
index_internal=None,
mapper=None,
**kwargs,
) -> pd.DataFrame:
df = get_data_frame_index_multi(*args, **kwargs)
if mapper is None:
def identity(x): return x
mapper = identity
if index_internal is None:
index_internal = df.index.to_flat_index().map(mapper)
df = df.reset_index()
df.index = pd.Index(index_internal, name=name_index_internal)
return df
def get_data_frame_internal_simple_index_multi(*arg, **kwargs):
df = (
get_data_frame_internal_index_multi(*arg, **kwargs)
.drop(columns=list(names_index_container_data_indexed_multi))
)
return df
get_data_frame_internal_index_multi_sets = partial(get_data_frame_internal_index_multi, mapper=frozenset)
get_data_frame_internal_simple_index_multi_sets = partial(
get_data_frame_internal_simple_index_multi,
mapper=frozenset,
)
assert_frame_equal = partial(
pd.testing.assert_frame_equal,
check_dtype=False,
check_column_type=False,
check_index_type=False,
)
assert_index_equal = partial(pd.testing.assert_index_equal, exact=False)
def check_nan(x):
try:
tf = isnan(x)
except TypeError:
tf = False
return tf
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__names_index(klass):
assert isinstance(klass._names_index, Sequence)
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__name_index_internal(klass):
assert isinstance(klass._name_index_internal, str)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty index
([], dict(index=pd.Index([])), pd.Index([])),
# non empty index
([], dict(index=pd.Index([0])), pd.Index([0])),
# empty multi index
([], dict(index=pd.MultiIndex.from_arrays([[]])), pd.MultiIndex.from_arrays([[]])),
])
def test_container_data_indexed__create_index_internal(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._create_index_internal(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non unique index
([], dict(index=pd.Index([0, 0])), ValueError, "Index needs to be unique for 'ContainerDataIndexed'"),
])
def test_container_data_indexed__validate_index_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert e.value.args[0] == message_error
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(), get_data_frame_internal()),
# non empty index
([], dict(index=[0]), get_data_frame_internal(index=[0])),
# empty index
([], dict(index=[]), get_data_frame_internal()),
# empty container idempotent
([], dict(index=tahini.core.base.ContainerDataIndexed()), get_data_frame_internal()),
# empty data dict
([], dict(data=dict()), get_data_frame_internal()),
# empty data records
([], dict(data=[]), get_data_frame_internal()),
# empty data frame
([], dict(data=pd.DataFrame()), get_data_frame_internal()),
# data dict
([], dict(data=dict(a=[1])), get_data_frame_internal(data=dict(a=[1]))),
# dict and index
([], dict(data=dict(a=[1]), index=['z']), get_data_frame_internal(data=dict(a=[1]), index=['z'])),
# data frame
([], dict(data=pd.DataFrame(data=dict(a=[1]))), get_data_frame_internal(data=dict(a=[1]))),
# data frame with index
(
[],
dict(data=pd.DataFrame(data=dict(a=[1]), index=['z'])),
get_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data frame and index
(
[],
dict(data=pd.DataFrame(data=dict(a=[1])), index=['z']),
get_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data records
([], dict(data=[[1]]), get_data_frame_internal(data=[[1]])),
([], dict(data=['a', 'b']), get_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a'], ['b']]), get_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a', 'b']]), get_data_frame_internal({0: ['a'], 1: ['b']})),
# container idempotent
(
[],
dict(index=tahini.core.base.ContainerDataIndexed(data=pd.DataFrame(data=dict(a=[1]), index=['z']))),
get_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# index as column
([], dict(data=dict(index=[0, 1])), get_data_frame_internal(index=[0, 1])),
])
def test_container_data_indexed_init(args, kwargs, expected):
container = tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert_frame_equal(container.data_internal, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(index=pd.Index([])), pd.Index([], name=names_index_container_data_indexed)),
# non empty
([], dict(index=pd.Index([0])), pd.Index([0], name=names_index_container_data_indexed)),
])
def test_container_data_indexed__validate_index(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(data=pd.DataFrame()), get_data_frame()),
# non empty index
([], dict(data=pd.DataFrame(index=['a', 'b'])), get_data_frame(index=['a', 'b'])),
# non empty index with name
(
[],
dict(data=pd.DataFrame(index=pd.Index(['a', 'b'], name=f'not_{names_index_container_data_indexed}'))),
get_data_frame(index=['a', 'b']),
),
# non empty data
([], dict(data=pd.DataFrame(data=dict(a=[0, 1], b=[0, 1]))), get_data_frame(data=dict(a=[0, 1], b=[0, 1]))),
])
def test_container_data_indexed__validate_data(args, kwargs, expected):
df = tahini.core.base.ContainerDataIndexed._validate_data(*args, **kwargs)
assert_frame_equal(df, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non unique index
([], dict(index=[0, 0]), ValueError, "Index needs to be unique for 'ContainerDataIndexed'"),
# non matching length between index and data
(
[],
dict(data=pd.DataFrame(data=dict(a=[1])), index=[0, 1]),
ValueError,
"Length mismatch: Expected axis has 1 elements, new values have 2 elements",
),
# non matching length between index and data
(
[],
dict(data=pd.DataFrame(data=dict(a=[1, 2])), index=[0]),
ValueError,
"Length mismatch: Expected axis has 2 elements, new values have 1 elements",
),
])
def test_container_data_indexed_init_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert e.value.args[0] == message_error
types_index = (
st.iterables,
indexes,
)
elements_non_specific = (
st.binary,
st.booleans,
st.characters,
st.complex_numbers,
st.dates,
st.datetimes,
st.fractions,
st.integers,
st.none,
st.randoms,
st.text,
st.times,
st.uuids,
)
elements_specific = (
# pandas.Timedeltas max and min do not match python standard library datetime.timedelta max and min
(
st.timedeltas,
dict(min_value=pd.Timedelta.min.to_pytimedelta(), max_value=pd.Timedelta.max.to_pytimedelta()),
lambda x: True,
lambda x: True,
),
# error with decimals not being able to hash snan
(
st.decimals,
dict(),
lambda x: not x.is_snan(),
lambda x: True,
),
# cannot have duplicate nans
(
st.floats,
dict(),
lambda x: True,
lambda container: sum([isnan(item) for item in container]) < 2,
),
)
# todo fix https://github.com/tahini-dev/tahini/runs/1898415631?check_suite_focus=true
@pytest.mark.parametrize('type_index', types_index)
@pytest.mark.parametrize('elements, kwargs_elements, filter_elements, filter_type_index', [
*((item, dict(), lambda x: True, lambda x: True) for item in elements_non_specific),
*elements_specific,
])
@given(data=st.data())
def test_container_data_indexed_init_index_single_elements_type(
type_index,
elements,
kwargs_elements,
filter_elements,
filter_type_index,
data,
):
container = tahini.core.base.ContainerDataIndexed(
index=data.draw(
type_index(
elements=elements(**kwargs_elements).filter(filter_elements),
unique=True,
)
.filter(filter_type_index)
),
)
assert isinstance(container.data_internal, pd.DataFrame)
@pytest.mark.parametrize('type_index', types_index)
@pytest.mark.parametrize('elements', [
pytest.param(
st.timedeltas,
marks=pytest.mark.xfail(
reason='pandas.Timedeltas max and min do not match python standard library datetime.timedelta max and min',
),
),
pytest.param(
st.decimals,
marks=pytest.mark.xfail(
reason='error with decimals not being able to hash snan',
),
),
pytest.param(
st.floats,
marks=pytest.mark.xfail(
reason='error with duplicate nans',
),
),
])
@given(data=st.data())
def test_container_data_indexed_init_index_single_elements_type_x_fail(type_index, elements, data):
container = tahini.core.base.ContainerDataIndexed(
index=data.draw(type_index(elements=elements.param(), unique=True)),
)
@pytest.fixture(scope='module')
def list_elements():
output_value = (
*(elements() for elements in elements_non_specific),
*(item[0](**item[1]).filter(item[2]) for item in elements_specific),
)
return output_value
@pytest.mark.parametrize('type_index', types_index)
@given(data=st.data())
def test_container_data_indexed_init_index_multiple_elements_type(type_index, list_elements, data):
index = data.draw(
type_index(elements=st.one_of(*list_elements), unique=True)
.filter(lambda x: sum([check_nan(item) for item in x]) < 2)
)
container = tahini.core.base.ContainerDataIndexed(index=index)
assert isinstance(container.data_internal, pd.DataFrame)
@given(data=data_frames(columns=(columns('A', elements=st.integers())), index=indexes(elements=st.integers())))
def test_container_data_indexed_init_data_data_frame(data):
container = tahini.core.base.ContainerDataIndexed(data=data)
assert isinstance(container.data_internal, pd.DataFrame)
@pytest.mark.parametrize('container, expected', [
(tahini.core.base.ContainerDataIndexed(), [names_index_container_data_indexed]),
(tahini.core.base.ContainerDataIndexedMulti(), list(names_index_container_data_indexed_multi)),
(tahini.core.base.ContainerDataIndexedMultiSets(), list(names_index_container_data_indexed_multi)),
])
def test_container_data_indexed_names_index(container, expected):
names_index = container.names_index
assert names_index == expected
@pytest.mark.parametrize('container, data, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), pd.DataFrame(), get_data_frame()),
# non empty container
(tahini.core.base.ContainerDataIndexed(data=dict(a=['1'])), pd.DataFrame(), get_data_frame()),
# empty container and non empty data
(
tahini.core.base.ContainerDataIndexed(),
pd.DataFrame(data=dict(a=['1'])),
get_data_frame(data=dict(a=['1'])),
),
# non empty container and data
(
tahini.core.base.ContainerDataIndexed(data=dict(a=['1'])),
pd.DataFrame(data=dict(b=[2])),
get_data_frame(data=dict(b=[2])),
),
])
def test_container_data_indexed_data(container, data, expected):
container.data = data
assert_frame_equal(container.data, expected)
@pytest.mark.parametrize('container, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), get_data_frame_internal()),
# non empty container
(
tahini.core.base.ContainerDataIndexed(data=dict(a=['1'])),
get_data_frame_internal(data=dict(a=['1'])),
),
])
def test_container_data_indexed_data_internal(container, expected):
assert_frame_equal(container.data_internal, expected)
@pytest.mark.parametrize('container, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), get_data_frame(name_index=name_index_internal)),
# non empty container
(
tahini.core.base.ContainerDataIndexed(data=dict(a=['1'])),
get_data_frame(data=dict(a=['1']), name_index=name_index_internal),
),
])
def test_container_data_indexed_data_testing(container, expected):
assert_frame_equal(container.data_testing, expected)
@pytest.mark.parametrize('container, args, kwargs, type_error, message_error', [
# index not in container
(tahini.core.base.ContainerDataIndexed(), [], dict(index=[0]), KeyError, "[0] not found in axis")
])
def test_container_data_indexed_drop_error(container, args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
container.drop(*args, **kwargs)
assert e.value.args[0] == message_error
@pytest.mark.parametrize('container, args, kwargs, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), [], dict(), tahini.core.base.ContainerDataIndexed()),
# empty container ignore error
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(index=[0], errors='ignore'),
tahini.core.base.ContainerDataIndexed(),
),
# empty inputs
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# non empty
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(index=[0]),
tahini.core.base.ContainerDataIndexed(),
),
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(index=[0, 1]),
tahini.core.base.ContainerDataIndexed(),
),
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(index=[1]),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# drop columns
(
tahini.core.base.ContainerDataIndexed(index=[0], columns=['a']),
[],
dict(columns=['a']),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
])
def test_container_data_indexed_drop(container, args, kwargs, expected):
container = container.drop(*args, **kwargs)
tahini.testing.testing.assert_container_equal(container, expected)
@pytest.mark.parametrize('container, args, kwargs, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), [], dict(), tahini.core.base.ContainerDataIndexed()),
# empty column
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(data=pd.DataFrame(data=dict(value=[]))),
tahini.core.base.ContainerDataIndexed(data=dict(value=[])),
),
# empty inputs
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# empty container and non empty index
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(index=[0]),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# empty container and non empty data
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(data=dict(a=[1])),
tahini.core.base.ContainerDataIndexed(data=dict(a=[1])),
),
# update with no new changes
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(index=[0]),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# update seems to sort
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(index=[2, 1]),
tahini.core.base.ContainerDataIndexed(index=[0, 1, 2]),
),
# new column and index
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(data=dict(a=[1, 2])),
tahini.core.base.ContainerDataIndexed(data=dict(a=[1, 2])),
),
# new column for given index
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(index=[0, 1], data=dict(a=[1, 2])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
),
# new column and index item
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(index=[1, 2], data=dict(a=[1, 2])),
tahini.core.base.ContainerDataIndexed(index=[0, 1, 2], data=dict(a=[nan, 1, 2])),
),
# cannot update to nan with default func
(
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
[],
dict(index=[1], data=dict(a=[nan])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
),
# single value in column
(
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
[],
dict(index=[1], data=dict(a=[3])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 3])),
),
# single value in column
(
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
[],
dict(index=[0], data=dict(a=[3])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[3, 2])),
),
# new additional column
(
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2])),
[],
dict(index=[0, 1], data=dict(b=[2, 3])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2], b=[2, 3])),
),
# row update
(
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[1, 2], b=[2, 3])),
[],
dict(index=[0], data=dict(a=[4], b=[5])),
tahini.core.base.ContainerDataIndexed(index=[0, 1], data=dict(a=[4, 2], b=[5, 3])),
),
])
def test_container_data_indexed_update(container, args, kwargs, expected):
container = container.update(*args, **kwargs)
tahini.testing.testing.assert_container_equal(container, expected)
@pytest.mark.parametrize('container, args, kwargs, type_error, message_error', [
# missing map multiple
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(mapper={}),
ValueError,
"Index needs to be unique for 'ContainerDataIndexed'",
),
])
def test_container_data_indexed_map_error(container, args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
container.map(*args, **kwargs)
assert e.value.args[0] == message_error
@pytest.mark.parametrize('container, args, kwargs, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), [], dict(), tahini.core.base.ContainerDataIndexed()),
# empty inputs
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(),
tahini.core.base.ContainerDataIndexed(index=[0]),
),
# empty container
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(mapper=dict()),
tahini.core.base.ContainerDataIndexed(),
),
(
tahini.core.base.ContainerDataIndexed(),
[],
dict(mapper=dict(a=1)),
tahini.core.base.ContainerDataIndexed(),
),
# non empty
(
tahini.core.base.ContainerDataIndexed(index=[0]),
[],
dict(mapper={0: 1}), tahini.core.base.ContainerDataIndexed(index=[1]),
),
# change index type
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(mapper={0: 'a', 1: 'b'}),
tahini.core.base.ContainerDataIndexed(index=['a', 'b']),
),
# missing map
(
tahini.core.base.ContainerDataIndexed(index=[0, 1]),
[],
dict(mapper={0: 'a'}),
tahini.core.base.ContainerDataIndexed(index=['a', nan]),
),
])
def test_container_data_indexed_map(container, args, kwargs, expected):
container = container.map(*args, **kwargs)
tahini.testing.testing.assert_container_equal(container, expected)
@pytest.mark.parametrize('container, expected', [
(tahini.core.base.ContainerDataIndexed(), f'ContainerDataIndexed(index={get_data_frame().index})'),
(
tahini.core.base.ContainerDataIndexedMulti(),
f'ContainerDataIndexedMulti(index={get_data_frame_index_multi().index})',
),
(
tahini.core.base.ContainerDataIndexedMultiSets(),
f'ContainerDataIndexedMultiSets(index={get_data_frame_index_multi().index})',
),
])
def test_container_data_indexed_repr(container, expected):
repr_container = repr(container)
assert repr_container == expected
@pytest.mark.parametrize('container, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), []),
(tahini.core.base.ContainerDataIndexedMulti(), []),
(tahini.core.base.ContainerDataIndexedMultiSets(), []),
# non empty
(tahini.core.base.ContainerDataIndexed(index=[0]), [0]),
(tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1), (0, 2)]), [(0, 1), (0, 2)]),
(
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1), (0, 2)]),
[frozenset((0, 1)), frozenset((0, 2))],
),
])
def test_container_data_indexed_iter(container, expected):
assert [item for item in container.iter()] == expected
assert [item for item in container] == expected
@pytest.mark.parametrize('container, item, expected', [
# not in empty
(tahini.core.base.ContainerDataIndexed(), 0, False),
(tahini.core.base.ContainerDataIndexedMulti(), (0, 1), False),
(tahini.core.base.ContainerDataIndexedMultiSets(), (0, 1), False),
# contains
(tahini.core.base.ContainerDataIndexed(index=[0]), 0, True),
(tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]), (0, 1), True),
(tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]), (0, 1), True),
(tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]), (1, 0), True),
# not contains
(tahini.core.base.ContainerDataIndexed(index=[0]), 1, False),
(tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]), (0, 2), False),
(tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]), (1, 0), False),
(tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]), (0, 2), False),
])
def test_container_data_indexed_contains(container, item, expected):
assert (item in container) == expected
@pytest.mark.parametrize('container, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), 0),
(tahini.core.base.ContainerDataIndexedMulti(), 0),
(tahini.core.base.ContainerDataIndexedMultiSets(), 0),
# non empty
(tahini.core.base.ContainerDataIndexed(index=[0]), 1),
(tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]), 1),
(tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]), 1),
])
def test_container_data_indexed_len(container, expected):
assert len(container) == expected
@pytest.mark.parametrize('container_left, container_right, expected', [
# empty
(tahini.core.base.ContainerDataIndexed(), tahini.core.base.ContainerDataIndexed(), True),
(tahini.core.base.ContainerDataIndexedMulti(), tahini.core.base.ContainerDataIndexedMulti(), True),
(
tahini.core.base.ContainerDataIndexedMultiSets(),
tahini.core.base.ContainerDataIndexedMultiSets(),
True,
),
# non empty
(
tahini.core.base.ContainerDataIndexed(index=[0]),
tahini.core.base.ContainerDataIndexed(index=[0]),
True,
),
(
tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]),
tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1)]),
True,
),
(
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]),
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]),
True,
),
(
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1)]),
tahini.core.base.ContainerDataIndexedMultiSets(index=[(1, 0)]),
True,
),
# empty versus non empty
(tahini.core.base.ContainerDataIndexed(), tahini.core.base.ContainerDataIndexed(index=[0]), False),
# None right
(tahini.core.base.ContainerDataIndexed(), None, False),
# None left
(None, tahini.core.base.ContainerDataIndexed(), False),
# different order
(
tahini.core.base.ContainerDataIndexed(index=[1, 2]),
tahini.core.base.ContainerDataIndexed(index=[2, 1]),
True,
),
(
tahini.core.base.ContainerDataIndexedMulti(index=[(0, 1), (0, 2)]),
tahini.core.base.ContainerDataIndexedMulti(index=[(0, 2), (0, 1)]),
True,
),
(
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 1), (0, 2)]),
tahini.core.base.ContainerDataIndexedMultiSets(index=[(0, 2), (0, 1)]),
True,
),
])
def test_container_data_indexed_eq(container_left, container_right, expected):
assert (container_left == container_right) == expected
@pytest.mark.parametrize('args, kwargs, expected', [
# empty multi index
([], dict(index=pd.MultiIndex.from_arrays([[]])), pd.Index([])),
# non empty multi index
(
[],
dict(index=pd.MultiIndex.from_tuples([(0, 1)])),
| pd.Index([(0, 1)]) | pandas.Index |
from datetime import datetime
import pandas as pd
from bs4 import BeautifulSoup
import cloudscraper
from datetime import timedelta
class CalendarDataFeed:
def __init__(self, startYear, endYear, calendarSite = "https://www.forexfactory.com/calendar?day=" ):
self.startYear = startYear
self.endYear = endYear
self.calendarSite = calendarSite
self.filename = ''
self.saveLocation = ''
self.loadLocation = ''
self.calName = ''
self.TimeDictionary = {
"1": "13",
"2": "14",
"3": "15",
"4": "16",
"5": "17",
"6": "18",
"7": "19",
"8": "20",
"9": "21",
"10": "22",
"11": "23",
"12": "12"
}
self.calendarMonths = {
1: "jan",
2: "feb",
3: "mar",
4: "apr",
5: "may",
6: "jun",
7: "jul",
8: "aug",
9: "sept",
10: "oct",
11: "nov",
12: "dec"
}
def getDailyEvents(self, calendarSite, caldate, timeZone ='UTC'):
try:
parseURL = calendarSite + str(self.calendarMonths[caldate.month]) + str(caldate.day) + "." + str(caldate.year)
scraper = cloudscraper.create_scraper() # returns a CloudScraper instance
content = scraper.get(parseURL).text
soup = BeautifulSoup(content, "html.parser")
table = soup.find_all("tr", {"class":"calendar_row"})
forcal = []
for item in table:
dict= {}
dict['Currency'] = item.find_all("td", {"class": "calendar__currency"})[0].text
dict['Event'] = item.find_all("td", {"class": "calendar__event"})[0].text
dict['Time_Eastern'] = item.find_all("td", {"class": "calendar__time"})[0].text
impact = item.find_all("td", {"class": "impact"})
for icon in range(0, len(impact)):
try:
dict['Impact'] = impact[icon].find_all("span")[0]["title"].split(' ', 1)[0]
except:
dict['Impact'] = ''
dict['Actual'] = item.find_all("td", {"class": "calendar__actual"})[0].text
dict['Forecast'] = item.find_all("td", {"class": "calendar__forecast"})[0].text
forcal.append(dict)
df = pd.DataFrame(forcal)
df['Currency'] = df.Currency.str.replace(r'\n', '')
df['Time_Eastern'] = df['Time_Eastern'].fillna("0")
df['Time_Eastern'] = df['Time_Eastern']
newDayTime = []
for item in range(0,len(df)):
if ('Day' in df.iloc[item][2]):
newDayTime.append(24)
elif ("pm" in df.iloc[item][2]):
hour = df.iloc[item][2].replace("pm", '').replace("am", '').replace(u'\xa0', ' ').strip()
afternoon = hour[0:hour.find(":")]
afternoon = self.TimeDictionary[afternoon]
newTime = afternoon +hour[hour.find(":"):]
newDayTime.append(newTime)
elif ("am" in df.iloc[item][2]):
if (len(df.iloc[item][2].replace("pm", '').replace("am", '')+ ":00") == 7):
temp = "0" + df.iloc[item][2].replace("pm", '').replace("am", '')
newDayTime.append(temp)
else:
newDayTime.append(df.iloc[item][2].replace("pm", '').replace("am", ''))
else:
newDayTime.append("0")
df["Time"] = newDayTime
df["Date"] = str(caldate.year) + "." + str(caldate.month) + "." + str(caldate.day)
df["TimeIndex"] = self.DateConverter(df)
df["Event"] = df["Event"].str.lstrip()
df["Event"] = df["Event"].str.rstrip()
df = df.drop(['Time_Eastern', 'Impact', 'Forecast'], axis = 1)
return df
except Exception as e:
print('Error updating economic calendar Error could be: ', str(e))
def getDaySeveralCalender(self, urls, caldate, timeZone ='UTC'):
try:
for site in urls:
frame = self.getDailyEvents(site, caldate, timeZone ='UTC')
result = pd.concat([pd.DataFrame(),frame])
result = result.sort_values(by='Time', ascending=True)
result = result.reset_index(drop=True)
return result
except Exception as e:
print('Error updating economic calendar Error could be: ', str(e))
def saveCalendar(self, data, saveLocation, filename):
try:
self.saveLocation = saveLocation
self.filename = filename
data.to_csv(self.saveLocation + self.filename + ".csv", index = True)
print('Saving complete...')
except:
print('Error saving file')
def loadhistCalendar(self, loadLocation, calName):
try:
self.loadLocation = loadLocation
self.calName = calName
histcal = pd.read_csv(self.loadLocation + self.calName + '.csv')
return histcal
except Exception as e:
print('Error loading economic calendar Error could be: ', str(e))
def loadhistCalendarTimeIndexVersion(self, loadLocation, calName, timeZone ='UTC'):
try:
self.loadLocation = loadLocation
self.calName = calName
histcal = pd.read_csv(self.loadLocation + self.calName + '.csv')
histcal["TimeIndex"] = self.DateConverter(histcal)
histcal = histcal[['Date',
'Time',
'Currency',
'Event',
'Impact',
'Actual',
'Forecast',
'Previous',
'TimeIndex']]
histcal["Date"] = [ item.date() for item in histcal["TimeIndex"]]
return histcal
except Exception as e:
print('Error loading economic calendar Error could be: ', str(e))
def createFullDay(self, frames):
result = pd.concat(frames)
todaysEvents = result.sort_values(by='Time', ascending=True)
return todaysEvents
def currencyPairs(self, data, curr1 = 'EUR', curr2 = 'CHF'):
currPair = data[ (data["Currency"] == curr1) &
(data["Currency"] == curr2)]
return currPair
def downloadCalendar(self, calendarSite, startDate, endDate, timeZone ='UTC'):
try:
for caldate in self.daterange(startDate, endDate):
currentDay = self.getDailyEvents(self, calendarSite, caldate, timeZone ='UTC')
result = pd.concat([pd.DataFrame(),currentDay])
return result
except Exception as e:
print('Error creating new Date column: ', str(e))
def eventInFuture(self, calendar, priceFeedTime, delta, timeIndex = "TimeIndex"):
newTime = priceFeedTime + timedelta(minutes=delta)
if calendar[calendar[timeIndex] == newTime]["Event"].isnull().iloc[0]:
if len(calendar[calendar["TimeIndex"] == newTime]["Event"]) > 1:
return calendar[calendar["TimeIndex"] == newTime]["Event"]
else:
return calendar[calendar["TimeIndex"] == newTime]["Event"].iloc[0]
'''END Corefunctions'''
'''Start Helperfunction'''
def DateConverter(self, data, dateColumn = "Date", dayTimeColumn = "Time", timeZone ='UTC'):
try:
formTime = '%Y.%m.%d %H:%M'
tempDF = | pd.DataFrame() | pandas.DataFrame |
from rpy2.robjects import pandas2ri
import numpy as np
import pandas as pd
import wrfpywind.data_preprocess as pp
import xarray as xr
from .util import _get_r_module, _attach_obs, _xr2pd, _fxda, _fxda_grid
def fmt_training_data(wrfda, obsda):
# Get and format data for only north buoy at 100m
data_n = _attach_obs(wrfda, obsda, location='north', height=100)
# Covert the xr.DataArray into a pd.DataFrame & remove NaNs
data_n = _xr2pd(data_n)
# Get and format data for only south buoy at 100m
data_s = _attach_obs(wrfda, obsda, location='south', height=100)
# Covert the xr.DataArray into a pd.DataFrame & remove NaNs
data_s = _xr2pd(data_s)
# Combine the data from the two buoys into the same dataframe
data = pd.concat([data_s, data_n], axis=0)
# Reset the index and dropping (as it will just be the repeating sequence)
data = data.reset_index(drop=True)
return data
def fmt_test_data(wrfda, obsda):
# We need an observation as a placeholder (it won't actually be used),
# so we'll use the one from the north buoy
data = _attach_obs(wrfda, obsda, location='north', height=100)
# Covert the xr.DataArray into a pd.DataFrame & remove NaNs
data = _xr2pd(data, drop_na=False)
return data
def fmt_grid_data(wrfda):
# Now, extract a the full grid for a single time slice and make it so that each ensemble member still contains's it's own column
# Note -- should still format the time variable here
# Get the latitude and longitude variables
XLAT = wrfda.to_dataframe().XLAT.unstack(0).iloc[:,0].rename('XLAT')
XLONG = wrfda.to_dataframe().XLONG.unstack(0).iloc[:,0].rename('XLONG')
# Unstack the "model" index
wspdgrid_unstacked = wrfda.to_dataframe().wspd_wrf.unstack(0)
# Concat the latitude and longitude variables onto the raw member wind speed forecasts.
# The code allows for the inclusion of a "Time" index (correspoinding to one datetime) or not.
try:
wspdgrid = pd.concat([wspdgrid_unstacked, XLAT, XLONG], axis=1).reset_index(['south_north', 'west_east'], drop=True).reset_index('Time')
except KeyError:
wspdgrid = pd.concat([wspdgrid_unstacked, XLAT, XLONG], axis=1).reset_index(['south_north', 'west_east'], drop=True)
return wspdgrid, wspdgrid_unstacked
def get_fmt_df(obs, start_date, end_date, datadir='../data/', type='train'):
# Open the xarray Dataset contianing wind speed data for the entire domain
# note that you must use a `Dataset` object for the `extract_buoy_da` function to work.
ensds = xr.open_dataset(f"{datadir}ensds_{start_date.strftime('%Y%m%d')}-{end_date.strftime('%d')}.nc")
# Get data only at the buoy locations
ensda = pp.extract_buoy_da(ensds, varname='wspd_wrf', locations=['south', 'north'])
# Combine ensemble data and training or test data into a pd.DataFrame in the correct format
if type == 'train':
fmt_df = fmt_training_data(ensda, obs)
elif type == 'test':
fmt_df = fmt_test_data(ensda, obs)
else:
print(f'{type} is invalid.')
raise ValueError
return fmt_df
def get_bma_fit(train_data, gamma_bma=None):
"""
Wrapper function for the R fit_bma function in the gamma_bma module
"""
# Activate pandas2ri
pandas2ri.activate()
if gamma_bma is None:
# Read the R gamma_bma module into Python
gamma_bma = _get_r_module('../R/gamma_bma.r', 'gamma_bma')
# Fit the BMA model
fit = gamma_bma.fit_bma(train_data, n_ens_members=5)
return fit
def read_fmt_fit_bma(t_init, obs, n_days=2, sim_len=4, datadir='../data/'):
"""
Lorem ipsum
"""
# Convert the initialization time to a Timestamp if it's not given as one
if type(t_init) != pd.Timestamp:
t_init = pd.to_datetime(t_init)
# Find the first training day
d1_training = t_init - pd.DateOffset(days=n_days)
# Specify the start dates
start_dates = pd.date_range(d1_training, periods=n_days)
# Specify the end dates by specifying how long these simlulations should last
end_dates = start_dates + | pd.DateOffset(days=sim_len) | pandas.DateOffset |
from django.http import JsonResponse
from collections import Counter
import pandas as pd
import json
from datetime import date, timedelta
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse
from django.db.models import Avg, Sum, Count
from django.shortcuts import render
from django.views.generic import TemplateView
from relations.models import *
from entities.models import *
def make_href(row, entity='work', id='id', label=None):
url = reverse(
'entities:generic_entities_detail_view',
kwargs={'pk': row[id], 'entity': entity}
)
if label:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, row[label])
else:
element = """<a href="{}" target='_blank'>{}</a>""".format(url, 'link2object')
return element
def calculate_duration(row):
if row['end_date'] and row['start_date']:
time = pd.to_timedelta(
(row['end_date']-row['start_date']) + timedelta(days=1)
).__str__()
else:
time = | pd.to_timedelta("0 days") | pandas.to_timedelta |
"""
OneSeries is an extended variant of pandas.Seres, which also inherits all the pandas.Series
features and ready to use. It contains many useful methods for a better experience on data analysis.
WARNING: Because this module is still pre-alpha, so many features are unstable.
"""
import pandas as pd
from pandas import Series
from pandas import DataFrame
import matplotlib.pyplot as plt
# oneline module
from .plot import Plot
from ..tools.compat import import_optional_dependency
class OneSeries(Series, Plot):
def __init__(self, data):
"""
Inherit from pandas.Series.
:param data: the import data
"""
super().__init__(data=data)
@property
def _one_data(self):
from .onedata import OneData
return OneData
def r_append(self, other):
"""
Append another OneSeries data to the right of this series and return a new OneData.
:param other: the other OneSeries
:return: OneData
"""
if isinstance(other, OneSeries):
return self._one_data({self.name: self, other.name: other})
elif isinstance(other, self._one_data) or isinstance(other, DataFrame):
return self._one_data( | pd.concat([self, other], axis=1) | pandas.concat |
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...models.geosim import GeoSim
from ...core.trajectorydataframe import TrajDataFrame
def global_variables():
# tessellation
tess_polygons = [[[7.481, 45.184],
[7.481, 45.216],
[7.526, 45.216],
[7.526, 45.184],
[7.481, 45.184]],
[[7.481, 45.216],
[7.481, 45.247],
[7.526, 45.247],
[7.526, 45.216],
[7.481, 45.216]],
[[7.526, 45.184],
[7.526, 45.216],
[7.571, 45.216],
[7.571, 45.184],
[7.526, 45.184]],
[[7.526, 45.216],
[7.526, 45.247],
[7.571, 45.247],
[7.571, 45.216],
[7.526, 45.216]]]
geom = [shapely.geometry.Polygon(p) for p in tess_polygons]
tessellation = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
tessellation = tessellation.reset_index().rename(columns={"index": constants.TILE_ID})
social_graph = [[0,1],[0,2],[0,3],[1,3],[2,4]]
return tessellation, social_graph
tessellation, social_graph = global_variables()
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', [social_graph, 'random'])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
# First test set: CORRECT arguments, no ERRORS expected (#test: 4)
def test_geosim_generate_success(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
assert isinstance(tdf, TrajDataFrame)
# Second test set: WRONG arguments, expected to FAIL
# test 2.1: wrong n_agents (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [-2,-1,0])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_n_agents(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.2: end_date prior to start_date (#test: 1)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_dates(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.3: wrong type for the spatial_tessellation (#test: 5)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', ["", None, [], "tessellation", [1,2,3]])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_geosim_wrong_tex_type(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.4: #of tiles in spatial_tessellation < 2 (#test: 2)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [pd.DataFrame(),tessellation[:1]])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_tiles_num(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.5: wrong social_graph type (#test: 3)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', [None, False, 24])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=TypeError)
def test_geosim_wrong_social_graph_type(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# test 2.5: correct social_graph type with wrong value (#test: 2)
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['xyz', []])
@pytest.mark.parametrize('n_agents', [1,5])
@pytest.mark.parametrize('random_state', [2])
@pytest.mark.parametrize('show_progress', [True])
@pytest.mark.xfail(raises=ValueError)
def test_geosim_wrong_social_graph_value(start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# Third test set: assert the correctness of the model's functions
def all_equal(a, b, threshold=1e-3):
return mean_absolute_error(a, b) <= threshold
#a is the location_vector, and b represents che generated choices
def correcteness_set_exp(a,b):
for i in range(len(b)):
if a[i]>0 and b[i]>0:
return False
return True
def correcteness_set_ret(a,b):
for i in range(len(b)):
if b[i]>0 and a[i]==0:
return False
return True
def correcteness_set_exp_social(lva,lvc,choices):
for i in range(len(choices)):
if choices[i]>0:
if not (lva[i]==0 and lvc[i]>0):
return False
return True
def correcteness_set_ret_social(lva,lvc,choices):
for i in range(len(choices)):
if choices[i]>0:
if not (lva[i]>0 and lvc[i]>0):
return False
return True
# test 3.1: correct random_weighted_choice (#test: 1)
@pytest.mark.parametrize('size', [1000])
@pytest.mark.parametrize('n_picks', [int(1e4)])
def test_weighted_random_choice(size,n_picks):
np.random.seed(24)
geosim = GeoSim()
weigths = np.random.randint(0, 10, size=size)
theoretical = weigths/np.sum(weigths)
empirical = [0]*len(weigths)
for j in range(n_picks):
i = geosim.random_weighted_choice(weigths)
empirical[i]+=1
empirical = empirical/np.sum(empirical)
assert(all_equal(theoretical,empirical))
# test 3.2: correct exploration choices (#test: 1)
# create a fake location vector of size n for the agent A (id=0)
# m elements = 0 and j elements > 0, m+j=n
# EXPLORATION (in GeoSim uniformly at random)
@pytest.mark.parametrize('m', [100])
@pytest.mark.parametrize('j', [500])
@pytest.mark.parametrize('n_picks', [int(1e4)])
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [2])
@pytest.mark.parametrize('random_state', [24])
@pytest.mark.parametrize('show_progress', [True])
def test_correctness_exp(m, j, n_picks, start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
np.random.seed(random_state)
# create a fake location vector of size n for the agent A (id=0)
# m elements = 0 and j elements > 0, m+j=n
location_vector = [0]*m + list(np.random.randint(5, 10, size=j))
choices = [0]*len(location_vector)
np.random.shuffle(location_vector)
#assign this location vector to agent with id=0
geosim.agents[0]['location_vector']=np.array(location_vector)
for j in range(n_picks):
location_id = geosim.make_individual_exploration_action(0)
choices[location_id]+=1
#test 1 correctness of the choices; i.e., no location j s.t. lv[j]>0
res_1 = correcteness_set_exp(location_vector,choices)
#test 2 correct probabilities
empirical = choices/np.sum(choices)
theoretical = [1/m if location_vector[i]==0 else 0 for i in range(len(location_vector))]
res_2 = all_equal(theoretical,empirical)
assert((res_1,res_2)==(True,True))
# test 3.3: correct return choices (#test: 1)
# create a fake location vector of size n for the agent A (id=0)
# m elements = 0 and j elements > 0, m+j=n
# RETURN (prop. to number of visits)
@pytest.mark.parametrize('m', [100])
@pytest.mark.parametrize('j', [500])
@pytest.mark.parametrize('n_picks', [int(1e4)])
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', ['random'])
@pytest.mark.parametrize('n_agents', [2])
@pytest.mark.parametrize('random_state', [24])
@pytest.mark.parametrize('show_progress', [True])
def test_correctness_ret(m, j, n_picks, start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
np.random.seed(random_state)
# create a fake location vector of size n for the agent A (id=0)
# m elements = 0 and j elements > 0, m+j=n
location_vector = [0]*m + list(np.random.randint(5, 10, size=j))
choices = [0]*len(location_vector)
np.random.shuffle(location_vector)
#assign this location vector to agent with id=0
geosim.agents[0]['location_vector']=np.array(location_vector)
for j in range(n_picks):
location_id = geosim.make_individual_return_action(0)
choices[location_id]+=1
#test 1 correctness of the choices; i.e., no location j s.t. lv[j]=0
res_1 = correcteness_set_ret(location_vector,choices)
#test 2 correct probabilities
empirical = choices/np.sum(choices)
theoretical = location_vector/np.sum(location_vector)
res_2 = all_equal(theoretical,empirical)
assert((res_1,res_2)==(True,True))
# test 3.4: correct social exploration choices (#test: 1)
# create a fake location vector of size n for the agent A (id=0) and agent C (id=1)
# agent A and C are connected in the social graph
# m elements = 0 and j elements > 0, m+j=n
# SOCIAL EXPLORATION (prop. to number of visits of a social contact)
@pytest.mark.parametrize('m', [100])
@pytest.mark.parametrize('j', [500])
@pytest.mark.parametrize('n_picks', [int(1e4)])
@pytest.mark.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
@pytest.mark.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
@pytest.mark.parametrize('spatial_tessellation', [tessellation])
@pytest.mark.parametrize('social_graph', [[(1,2)]])
@pytest.mark.parametrize('n_agents', [2])
@pytest.mark.parametrize('random_state', [24])
@pytest.mark.parametrize('show_progress', [True])
def test_correctness_exp_social(m, j, n_picks, start_date, end_date, spatial_tessellation,
social_graph, n_agents, random_state, show_progress):
geosim = GeoSim()
tdf = geosim.generate(start_date, end_date, social_graph=social_graph,
spatial_tessellation=spatial_tessellation,
n_agents = n_agents, random_state=random_state,
show_progress=show_progress)
# agent A (id=0)
location_vector_a = [0]*(m-2) + list(np.random.randint(5, 10, size=j))
np.random.shuffle(location_vector_a)
location_vector_a = location_vector_a + [0]*2
choices = [0]*len(location_vector_a)
#assign this location vector to agent with id=0
geosim.agents[0]['location_vector']=np.array(location_vector_a)
# agent C (id=1)
location_vector_c = [0]*(m) + list(np.random.randint(5, 10, size=j-2))
np.random.shuffle(location_vector_c)
location_vector_c = location_vector_c + [5,3]
#assign this location vector to agent with id=1
geosim.agents[1]['location_vector']=np.array(location_vector_c)
for j in range(n_picks):
location_id = geosim.make_social_action(0, 'exploration')
choices[location_id]+=1
#test 1 correctness of the choices;
res_1 = correcteness_set_exp_social(location_vector_a,location_vector_c,choices)
#test 2 correct probabilities
empirical = choices/np.sum(choices)
set_c = [location_vector_c[i] if (location_vector_a[i]==0 and location_vector_c[i]>0) else 0 for
i in range(len(location_vector_a))]
theoretical = set_c/np.sum(set_c)
res_2 = all_equal(theoretical,empirical)
assert((res_1,res_2)==(True,True))
# test 3.5: correct social return choices (#test: 1)
# create a fake location vector of size n for the agent A (id=0) and agent C (id=1)
# agent A and C are connected in the social graph
# m elements = 0 and j elements > 0, m+j=n
# SOCIAL RETURN (prop. to number of visits of a social contact)
@pytest.mark.parametrize('m', [100])
@pytest.mark.parametrize('j', [500])
@pytest.mark.parametrize('n_picks', [int(1e4)])
@pytest.mark.parametrize('start_date', [ | pd.to_datetime('2020/01/01 08:00:00') | pandas.to_datetime |
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from datetime import datetime
from numpy.linalg import norm
from tqdm.auto import tqdm
from glob import glob
import pandas as pd
import numpy as np
import subprocess
import sys
import os
import nltk
import re
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
lemma = WordNetLemmatizer()
stopwords = stopwords.words('english')
def clean_text(text):
text = str(text).lower()
# Remove urls
text = re.sub('http[s]?://\S+', '', text)
# remove emojis
text = emoji_pattern.sub(r'', text)
token_words = word_tokenize(text)
pos_tags = nltk.pos_tag(token_words)
stem_text=[]
for token,pos in pos_tags:
token = re.sub("[>@,^\.!?']", '', token)
if token not in stopwords and (len(token) > 3 or token=="joe" or token=="amy" or token=="aoc"):
pos_tag = get_wordnet_pos(pos)
token = lemma.lemmatize(token,pos=pos_tag) if pos_tag else token
stem_text.append(token)
return stem_text
cos_sim = lambda a,b : np.dot(a, b)/(norm(a)*norm(b))
cos_dist = lambda a,b : 1 - cos_sim(a,b)
CANDIDATE_SUBS = ["JoeBiden","SandersForPresident","BaemyKlobaechar","ElizabethWarren","Pete_Buttigieg","YangForPresidentHQ"]
def generate_embedding(time_frame=None,**arg_dict):
output = "./trained_embeddings/vecs_{p1}_{p2}.txt".format(**arg_dict)
if time_frame:
subprocess.run("mkdir -p trained_embeddings/temporal/{}".format(time_frame), shell=True)
output = "./trained_embeddings/temporal/{}/{}_vecs_{p1}_{p2}.txt".format(time_frame,time_frame,**arg_dict)
command = "./word2vecf/word2vecfadapted -output {} -train {file_data} -wvocab {file_wv} -cvocab {file_cv} -threads 150 -alpha {alpha} -size {size} -{param1} {p1} -{param2} {p2}".format(output,**arg_dict)
if not os.path.exists(output):
print("\t * Starting {}".format(output))
subprocess.run(command, shell=True)
return output
def load_embedding(filepath,split=True, **kwargs):
embedding = pd.read_csv(filepath, sep=' ', header=None, skiprows=1, **kwargs)
embedding.set_index(0)
embedding = embedding.rename(columns={0: 'subreddit'})
subreddits, vectors = embedding.iloc[:, 0], embedding.iloc[:, 1:151]
vectors = vectors.divide(np.linalg.norm(vectors, axis=1), axis=0)
if split:
return subreddits, vectors
embedding = | pd.concat([subreddits, vectors], axis=1) | pandas.concat |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
from pathlib import Path
from argparse import ArgumentParser
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Set
from pandas import DataFrame, concat
from lib.io import read_file, display_progress
from lib.net import download
from lib.pipeline import DataPipeline
from lib.utils import URL_OUTPUTS_PROD
def compare_sets(curr: Set[str], prod: Set[str]) -> List[str]:
"""
Compares two sets of values and returns a list of items with "+" or "-" prefix indicating
whether the value has been added or is missing in the second set compared to the first.
Args:
curr: Set of new data
prod: Set of previous data
Return:
List[str]: List of differences
"""
diff_list: List[str] = []
for val in sorted(prod - curr):
diff_list += [f"-{val}"]
for val in sorted(curr - prod):
diff_list += [f"+{val}"]
return diff_list
def compare_tables(table_curr: DataFrame, table_prod: DataFrame) -> Dict[str, Any]:
cmp_out: Dict[str, Any] = {}
# Make a copy of the tables to avoid modification
table_curr = table_curr.copy()
table_prod = table_prod.copy()
# Compare the number of records
cmp_out["records"] = f"{len(table_curr) - len(table_prod):+d}"
# Compare the columns
cmp_out["columns"] = compare_sets(set(table_curr.columns), set(table_prod.columns))
# Create a single, indexable column
idx_cols = ["key"] + (["date"] if "date" in table_curr.columns else [])
curr_idx, prod_idx = table_curr[idx_cols[0]], table_prod[idx_cols[0]]
for col in idx_cols[1:]:
curr_idx = curr_idx + " " + table_curr[col]
prod_idx = prod_idx + " " + table_prod[col]
# Compare the sets of indices
cmp_out["indices"] = compare_sets(set(curr_idx), set(prod_idx))
# Compare the shared indices
table_curr["_cmp_idx"] = curr_idx
table_prod["_cmp_idx"] = prod_idx
shared_df = concat([table_curr, table_prod]).drop_duplicates(keep=False)
cmp_out["modifications"] = shared_df["_cmp_idx"].values.tolist()
return cmp_out
if __name__ == "__main__":
# Process command-line arguments
argparser = ArgumentParser()
argparser.add_argument("--path", type=str, default=None)
argparser.add_argument("--table", type=str, default=None)
argparser.add_argument("--output", type=str, default=None)
argparser.add_argument("--progress", action="store_true")
args = argparser.parse_args()
assert args.path is not None or args.table is not None, "Either --path or --table must be given"
# Manage whether to show progress with our handy context manager
with display_progress(args.progress):
# Derive the name of the table from either provided table name or its path
table_name = args.table or Path(args.path).stem
# If we have a path, we can just read the table from there
if args.path:
table_curr = read_file(args.path)
# If there is no local path for the table, we have to produce the table ourselves
else:
with TemporaryDirectory() as output_folder:
output_folder = Path(output_folder)
pipeline_name = table_name.replace("-", "_")
data_pipeline = DataPipeline.load(pipeline_name)
(output_folder / "snapshot").mkdir()
(output_folder / "intermediate").mkdir()
table_curr = data_pipeline.run(pipeline_name, output_folder)
# Download the table from the production server if it exists
try:
table_prod = read_file(f"{URL_OUTPUTS_PROD}/{table_name}.csv")
except:
table_prod = | DataFrame(columns=table_curr.columns) | pandas.DataFrame |
import numpy as np
from RecSearch.DataInterfaces.Recommenders.Abstract import IMatrixRankRecommender
from itertools import combinations, permutations
import pandas as pd
class IXCourseDiffRankRecommend(IMatrixRankRecommender):
def iget_recommendation(self, who: dict, possible: pd.DataFrame, n_column: str, ir_column: str, xi: float, tol: float, max_iter: int, filter_name: str = None) -> list:
f = self.get_filter(who, filter_name)
df = self.get_reduced_df(who, possible, n_column)
df[ir_column] = df[ir_column].apply(lambda x: sorted(
[(k, v) for k, v in x.items() if k not in f],
key=lambda y: y[0]))
# Get 2-Combos of student courses for pairwise comparison
df[ir_column] = df[ir_column].apply(lambda x: list(combinations(x, 2)))
# Format as (Course 1, Course 2, Course 1 Grade - Course 2 Grade)
df[ir_column] = df[ir_column].apply(lambda x:
[(y[0][0], y[1][0], y[0][1]-y[1][1]) for y in x if len(y) > 0])
# Remove Empty Course-Ratings
df = df[df[ir_column].apply(lambda x: len(x) > 0)]
if df.empty:
return []
new_df = pd.DataFrame(list(df[ir_column].explode()), columns=['Course1', 'Course2', 'Diff'])
new_df = new_df.dropna(subset=['Course1', 'Course2'])
# Count Course 1 better than Course 2
pos_df = new_df.groupby(['Course1', 'Course2'])['Diff'].agg(lambda x: sum(x > 0)).reset_index()
# Count Course 1 worse than Course 2
neg_df = new_df.groupby(['Course1', 'Course2'])['Diff'].agg(lambda x: sum(x < 0)).reset_index()
# Get Course1 Union Course2 for index/columns to make square matrix
all_courses = sorted(list(set(new_df['Course1'].unique()).union(set(new_df['Course2'].unique()))))
# Create dummy data frame with all 2-permutations of index (square matrix)
distance_df = pd.DataFrame(permutations(all_courses, r=2), columns=['Course1', 'Course2'])
# Merge and pivot (courses are sorted, idx=Course1)
pos_distance_df = pd.merge(distance_df, pos_df, how='left', on=['Course1', 'Course2']).fillna(0)
positive_matrix = pos_distance_df.pivot(index=['Course1'], columns=['Course2'], values=['Diff'])
# Merge and pivot (courses are sorted, idx=Course2)
neg_distance_df = | pd.merge(distance_df, neg_df, how='left', on=['Course1', 'Course2']) | pandas.merge |
# Runs after normalization and per_person_ratio_and_factor and pre_plot_aggregation.
import shutil
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import collections
def PlotWithSlices(df, data_name, output_dir):
for group_name in ['Gender', 'AgeGroup', 'Family1', 'Family2', 'Family3', 'Family4', 'Education1', 'Career1', 'Career2', 'Language1']:
grouped_df = df.groupby([group_name])['deltaF1','deltaF2'].mean()
grouped_df.to_csv(output_dir / (data_name + '_' + group_name + '_raw.csv'), index=True)
for formant in ['deltaF1', 'deltaF2']:
x = []
y = []
full_group_name = '@'.join([data_name, formant, group_name])
for _, row in grouped_df.iterrows():
x.append(group_name + '=' +str(row.name))
y.append(row[formant])
plt.figure(figsize=(10, 6))
plt.bar(x, y)
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
plt.close()
def SlicePlotData(df, output_dir):
matched_rows = []
sa_a1_sb_a1 = df[df['IsSba2']=='No']
sa_a1_sb_a1.to_csv(output_dir / 'sa_a1_sb_a1_raw.csv', index=False)
sa_a1_sb_a1_mean = sa_a1_sb_a1.groupby(['Pos'])['deltaF1', 'deltaF2'].mean()
sa_a1_sb_a1_mean.to_csv(output_dir / 'sa_a1_sb_a1_mean.csv', index=True)
sa_a1_sb_a2 = df[df['IsSba2']=='Yes']
sa_a1_sb_a2.to_csv(output_dir / 'sa_a1_sb_a2_raw.csv', index=False)
sa_a1_sb_a2_mean = sa_a1_sb_a2.groupby(['Pos'])['deltaF1', 'deltaF2'].mean()
sa_a1_sb_a2_mean.to_csv(output_dir / 'sa_a1_sb_a2_mean.csv', index=True)
matched_rows = []
for _, row in df.iterrows():
comps = row['Filename'].split('_')
lang = comps[0]
pos = comps[4]
if lang == 'S' and pos == 'b' and row['Annotation'] == 'a2':
matched_rows.append(row)
input_df = pd.DataFrame(matched_rows)
PlotWithSlices(input_df, 'all_s_sb_a2', output_dir)
matched_rows = []
for _, row in df.iterrows():
comps = row['Filename'].split('_')
lang = comps[0]
pos = comps[4]
if lang == 'M' and pos == 'b' and row['Annotation'] == 'a2':
matched_rows.append(row)
input_df = pd.DataFrame(matched_rows)
PlotWithSlices(input_df, 'all_m_mb_a2', output_dir)
input_base_dir = Path('./analysis/output/')
output_base_dir = Path('./analysis/output/delta')
shutil.rmtree(output_base_dir, ignore_errors=True)
output_base_dir.mkdir(parents=True, exist_ok=True)
df = | pd.read_csv(input_base_dir / 'S_all_plot_raw_data.csv') | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.