prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Frequency')
else:
for ax in self.axes:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [com.pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=0, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, y in self._iter_data()]
labels = [com.pprint_thing(l) for l in labels]
if not self.use_index:
labels = [com.pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if com.is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
com.is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
klass_coord=df_coord, klass_ax=df_ax,
klass_unique=df_unique, klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
layout : tuple (optional)
(rows, columns) for the layout of the plot
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, # Dataframe unique
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@ | Appender(_shared_docs['plot'] % _shared_doc_series_kwargs) | pandas.util.decorators.Appender |
import streamlit as st
from alphapept.gui.utils import (
check_process,
init_process,
start_process,
escape_markdown,
)
from alphapept.paths import PROCESSED_PATH, PROCESS_FILE, QUEUE_PATH, FAILED_PATH
from alphapept.settings import load_settings_as_template, save_settings
import os
import psutil
import datetime
import pandas as pd
import time
import yaml
import alphapept.interface
def queue_watcher():
"""
Start the queue_watcher.
"""
# This is in pool and should be reporting.
print(f"{datetime.datetime.now()} Started queue_watcher")
init_process(PROCESS_FILE)
while True:
queue_files = [_ for _ in os.listdir(QUEUE_PATH) if _.endswith(".yaml")]
# print(f'{datetime.datetime.now()} queue_watcher running. {len(queue_files)} experiments to process.')
if len(queue_files) > 0:
created = [
time.ctime(os.path.getctime(os.path.join(QUEUE_PATH, _)))
for _ in queue_files
]
queue_df = pd.DataFrame(queue_files, columns=["File"])
queue_df["Created"] = created
file_to_process = queue_df.sort_values("Created")["File"].iloc[0]
file_path = os.path.join(QUEUE_PATH, file_to_process)
settings = load_settings_as_template(file_path)
current_file = {}
current_file["started"] = datetime.datetime.now()
current_file["file"] = file_to_process
current_file_path = os.path.join(QUEUE_PATH, "current_file")
with open(current_file_path, "w") as file:
yaml.dump(current_file, file, sort_keys=False)
logfile = os.path.join(
PROCESSED_PATH, os.path.splitext(file_to_process)[0] + ".log"
)
try:
settings_ = alphapept.interface.run_complete_workflow(
settings, progress=True, logfile=logfile
)
save_settings(settings_, os.path.join(PROCESSED_PATH, file_to_process))
except Exception as e:
print(f"Run {file_path} failed with {e}")
settings_ = settings.copy()
settings_["error"] = f"{e}"
save_settings(settings_, os.path.join(FAILED_PATH, file_to_process))
if os.path.isfile(current_file_path):
os.remove(current_file_path)
os.remove(file_path)
else:
time.sleep(15)
def terminate_process():
with st.spinner("Terminating processes.."):
running, last_pid, p_name, status, queue_watcher_state = check_process(
PROCESS_FILE
)
parent = psutil.Process(last_pid)
procs = parent.children(recursive=True)
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=3)
for p in alive:
p.kill()
parent.terminate()
parent.kill()
st.success(f"Terminated {last_pid}")
current_file = os.path.join(QUEUE_PATH, "current_file")
with open(current_file, "r") as file:
cf_ = yaml.load(file, Loader=yaml.FullLoader)
cf = cf_["file"]
file_in_process = os.path.join(QUEUE_PATH, cf)
target_file = os.path.join(FAILED_PATH, cf)
os.rename(file_in_process, target_file)
st.success(
f"Moved {escape_markdown(file_in_process)} to {escape_markdown(target_file)}"
)
if os.path.isfile(current_file):
os.remove(current_file)
st.success(f"Cleaned up {escape_markdown(current_file)}")
time.sleep(3)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
def status():
st.write("# Status")
st.text(
f"This page shows the status of the current analysis.\nSwitch to `New experiment` to define a new experiment.\nSwitch to `Results` to see previous results."
)
status_msg = st.empty()
failed_msg = st.empty()
current_log = ""
log_txt = []
st.write("## Progress")
overall_txt = st.empty()
overall_txt.text("Overall: 0%")
overall = st.progress(0)
task = st.empty()
task.text("Current task: None")
current_p = st.empty()
current_p.text("Current progess: 0%")
current = st.progress(0)
last_log = st.empty()
st.write("## Hardware utilization")
c1,c2 = st.columns(2)
c1.text("Ram")
ram = c1.progress(0)
c2.text("CPU")
cpu = c2.progress(0)
running, last_pid, p_name, status, queue_watcher_state = check_process(PROCESS_FILE)
if not running:
start_process(target=queue_watcher, process_file=PROCESS_FILE, verbose=False)
st.warning(
"Initializing Alphapept and waiting for process to start. Please refresh page in a couple of seconds."
)
if not queue_watcher_state:
with st.spinner('Waiting for AlphaPept process to start.'):
while not queue_watcher_state:
running, last_pid, p_name, status, queue_watcher_state = check_process(PROCESS_FILE)
time.sleep(1)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
current_file = os.path.join(QUEUE_PATH, "current_file")
with st.expander(f"Full log "):
log_ = st.empty()
with st.expander(f"Queue"):
queue_table = st.empty()
with st.expander(f"Failed"):
failed_table = st.empty()
if st.checkbox("Terminate process"):
st.error(
f"This will abort the current run and move it to failed. Please confirm."
)
if st.button("Confirm"):
terminate_process()
while True:
ram.progress(
1 - psutil.virtual_memory().available / psutil.virtual_memory().total
)
cpu.progress(psutil.cpu_percent() / 100)
queue_files = [_ for _ in os.listdir(QUEUE_PATH) if _.endswith(".yaml")]
failed_files = [_ for _ in os.listdir(FAILED_PATH) if _.endswith(".yaml")]
n_failed = len(failed_files)
n_queue = len(queue_files)
if n_queue == 0:
status_msg.success(
f'{datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")} No files to process. Please add new experiments.'
)
current.progress(0)
overall.progress(0)
overall_txt.text("Overall: 0%")
task.text("None")
last_log.code("")
queue_table.table(pd.DataFrame())
else:
if os.path.isfile(current_file):
with open(current_file, "r") as file:
cf_ = yaml.load(file, Loader=yaml.FullLoader)
cf = cf_["file"]
cf_start = cf_["started"]
now = datetime.datetime.now()
delta = f"{now-cf_start}".split('.')[0]
status_msg.success(
f'{now.strftime("%d.%m.%Y %H:%M:%S")} Processing {escape_markdown(cf)}. Time elapsed {delta}'
)
logfile = os.path.join(PROCESSED_PATH, os.path.splitext(cf)[0] + ".log")
if current_log != logfile:
current_log = logfile
log_txt = []
f = open(logfile, "r")
lines = f.readlines()[-200:] # Limit to 200 lines
for line in lines:
if "__progress_current" in line:
current_p_ = float(line.split("__progress_current ")[1][:5])
current.progress(current_p_)
current_p.text(f"Current progress: {current_p_*100:.2f}%")
elif "__progress_overall" in line:
overall_p = float(line.split("__progress_overall ")[1][:5])
overall.progress(overall_p)
overall_txt.text(f"Overall: {overall_p*100:.2f}%")
elif "__current_task" in line:
task_ = line.strip("\n").split("__current_task ")[1]
task.text(f"Current task: {task_}")
else:
log_txt.append(line)
last_log.code("".join(log_txt[-3:]))
log_.code("".join(log_txt))
created = [
time.ctime(os.path.getctime(os.path.join(QUEUE_PATH, _)))
for _ in queue_files
]
queue_df = | pd.DataFrame(queue_files, columns=["File"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from collections import Counter
import os
import sys
data_path = "data\iris.data"
column_names = ["sepal_length", "sepal_width",
"petal_length", "petal_width", "class"]
def is_float_list(iterable):
"""Checks if all elements of an iterable are floats
"""
for element in iterable:
if type(element) is not float:
return False
return True
def ask_for_flower_data():
"""Asks for user input and processes it to the desired format
Returns:
flower_data: A list of 4 float values
"""
flower_data = tuple(input(
f"\nEnter the 4 values, separated by spaces and in order({column_names[:-1]}), you want to use to classify a flower\n").split())
try:
flower_data = tuple(float(item) for item in flower_data)
except:
print("Error during conversion. Please check input values")
return ()
return flower_data
def ask_for_neighbours():
"""Asks for user input for the number of neighbours
"""
number_of_neighbours = input(
"\nPlease indicate, with an integer, the number of neighbours desired\n")
try:
number_of_neighbours = int(number_of_neighbours)
except:
print("Error during conversion. Please check input value")
return 0
return number_of_neighbours
def get_data():
flower_data = ask_for_flower_data()
while len(flower_data) != 4 or not is_float_list(flower_data):
print("*WARNING: Invalid data. Try again*\n")
flower_data = ask_for_flower_data()
number_of_neighbours = ask_for_neighbours()
while number_of_neighbours <= 0 or type(number_of_neighbours) is not int:
number_of_neighbours = ask_for_neighbours()
return flower_data, number_of_neighbours
def distance_between_vectors(vector1, vector2):
# return np.sqrt(np.sum([x**2 for x in (vector1 - vector2)]))
return np.sqrt(np.sum([x**2 for x in np.subtract(vector1, vector2)]))
def knn_classify(data_iterable, number_of_neighbours):
"""Given an iterable of 4 features and n, return the class
obtained through KNN(n)
Args:
data_iterable: iterable of 4 features (not class) to use as input data
number_of_neighbours: the number of nearest neighbours to check
"""
iris_df = | pd.read_csv(data_path, names=column_names) | pandas.read_csv |
from abc import ABC, abstractmethod
from collections import defaultdict
from datetime import datetime
from functools import cached_property
from typing import List, Dict, Union, Optional, Iterable
import numpy as np
import pandas as pd
from gym import Space, spaces
from pandas import Interval
from torch.utils.data import Dataset
from yacht import Mode, utils
from yacht.data.markets import Market
from yacht.data.scalers import Scaler
from yacht.data.transforms import Compose
from yacht.logger import Logger
class DatasetPeriod:
def __init__(
self,
start: datetime,
end: datetime,
window_size: int,
include_weekends: bool,
take_action_at: str = 'current',
frequency: str = 'd'
):
assert frequency in ('d', )
self.unadjusted_start = start
self.unadjusted_end = end
self.period_adjustment_size = self.compute_period_adjustment_size(
window_size=window_size,
take_action_at=take_action_at
)
# Adjust start with a 'window_size' length so we take data from the past & actually start from the given start.
self.start = utils.adjust_period_with_window(
datetime_point=start,
window_size=self.period_adjustment_size, # We also use the initial price within the period.
action='-',
include_weekends=include_weekends,
frequency=frequency
)
self.end = end
self.window_size = window_size
self.include_weekends = include_weekends
self.take_action_at = take_action_at
self.frequency = frequency
assert self.start <= self.unadjusted_start
@classmethod
def compute_period_adjustment_size(cls, window_size: int, take_action_at: str) -> int:
assert take_action_at in ('current', 'next')
if take_action_at == 'current':
return window_size - 1
elif take_action_at == 'next':
return window_size
def __len__(self) -> int:
return utils.len_period_range(
start=self.start,
end=self.end,
include_weekends=self.include_weekends
)
class AssetDataset(Dataset, ABC):
PRICE_FEATURES = (
'Close',
'Open',
'High',
'Low'
)
def __init__(
self,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
mode: Mode,
logger: Logger,
window_size: int = 1,
):
"""
market:
storage_dir:
ticker:
intervals: data bars frequency
features: observation data features
decision_price_feature: the feature that it will used for buying / selling assets or other decision making
start:
end:
render_intervals: a list of datetime intervals to know if this environment should be rendered or not.
normalizer:
window_size: The past information that you want to add to the current item that you query from the dataset.
data: If data != None, it will be encapsulated within the Dataset Object, otherwise it will be queried
from the market.
"""
assert '1d' == intervals[0], 'One day bar interval is mandatory to exist & index=0 in input.intervals config.'
assert window_size >= 1
self.market = market
self.storage_dir = storage_dir
self.intervals = intervals
self.features = features
self.decision_price_feature = decision_price_feature
self.render_intervals = render_intervals
self.period = period
self.mode = mode
self.logger = logger
self.window_size = window_size
def close(self):
self.market.close()
@property
def period_window_size(self) -> int:
return self.period.window_size
@property
def period_adjustment_size(self) -> int:
return self.period.period_adjustment_size
@property
def take_action_at(self) -> str:
return self.period.take_action_at
@property
def first_observation_index(self) -> int:
# Starting from 0 & the minimum value for the window_size is 1.
return self.period_window_size - 1
@property
def last_observation_index(self) -> int:
return self.period_adjustment_size + self.num_days - 1
@property
def unadjusted_start(self) -> datetime:
return self.period.unadjusted_start
@property
def unadjusted_end(self) -> datetime:
return self.period.unadjusted_end
@property
def start(self) -> datetime:
return self.period.start
@property
def end(self) -> datetime:
return self.period.end
@property
def include_weekends(self) -> bool:
return self.market.include_weekends
@cached_property
def should_render(self) -> bool:
# Because it is not efficient to render all the environments, we choose over some desired logic what to render.
for render_interval in self.render_intervals:
if self.start in render_interval or self.end in render_interval:
return True
return False
@property
@abstractmethod
def num_days(self) -> int:
pass
@property
@abstractmethod
def num_assets(self) -> int:
pass
@property
@abstractmethod
def asset_tickers(self) -> List[str]:
pass
@abstractmethod
def index_to_datetime(self, integer_index: int) -> datetime:
pass
@abstractmethod
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
pass
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
"""
Args:
current_index: The relative index the data will be given from.
Returns:
The data features within the [current_index - window_size + 1, current_index] interval.
"""
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def get_prices(self) -> pd.DataFrame:
pass
@abstractmethod
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
pass
@abstractmethod
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def get_external_observation_space(self) -> Dict[str, Space]:
"""
Returns the gym spaces observation space in the format that the dataset gives the data.
"""
pass
class SingleAssetDataset(AssetDataset, ABC):
def __init__(
self,
ticker: str,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
scaler: Scaler,
window_transforms: Optional[Compose] = None,
window_size: int = 1,
data: Dict[str, pd.DataFrame] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.ticker = ticker
self.scaler = scaler
self.window_transforms = window_transforms
self.render_tickers = render_tickers
if data is not None:
self.data = data
else:
self.data = dict()
for interval in self.intervals:
self.data[interval] = self.market.get(
ticker=ticker,
interval=interval,
start=self.start,
end=self.end,
features=self.features + [self.decision_price_feature],
squeeze=False
)
self.prices = self.get_prices()
def __str__(self) -> str:
return self.ticker
def __len__(self) -> int:
# All the adjusted interval.
return len(self.prices)
@property
def num_days(self) -> int:
# Only the unadjusted interval.
return utils.len_period_range(
start=self.unadjusted_start,
end=self.unadjusted_end,
include_weekends=self.include_weekends
)
@property
def num_assets(self) -> int:
return 1
@property
def asset_tickers(self) -> List[str]:
return [self.ticker]
@cached_property
def should_render(self) -> bool:
if self.ticker in self.render_tickers:
return super().should_render
return False
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
return self.data['1d'].index[integer_index].to_pydatetime()
def get_prices(self) -> pd.DataFrame:
return self.market.get(
ticker=self.ticker,
interval='1d',
start=self.start,
end=self.end,
features=list(self.market.DOWNLOAD_MANDATORY_FEATURES) + [self.decision_price_feature],
squeeze=False
)
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
if t_tick is None:
decision_prices = self.prices.loc[slice(None), self.decision_price_feature]
decision_prices.name = 'decision_price'
else:
t_datetime = self.index_to_datetime(t_tick)
decision_prices = self.prices.loc[t_datetime, self.decision_price_feature]
decision_prices = pd.Series(decision_prices, index=[self.ticker], name='decision_price')
return decision_prices
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
period_data = self.data['1d'].loc[start:end, self.decision_price_feature]
period_mean = period_data.mean()
return pd.Series(period_mean, index=[self.ticker], name='mean_price')
def inverse_scaling(self, observation: dict, asset_idx: int = -1) -> dict:
for interval in self.intervals:
if asset_idx == -1:
observation[interval] = self.scaler.inverse_transform(observation[interval])
else:
observation[interval][:, :, asset_idx, :] = self.scaler.inverse_transform(
observation[interval][:, :, asset_idx, :]
)
return observation
class MultiAssetDataset(AssetDataset):
# TODO: Implement the multi-asset dependency within a DataFrame for faster processing.
def __init__(
self,
datasets: List[SingleAssetDataset],
storage_dir: str,
market: Market,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
window_size: int = 1,
attached_datasets: Optional[List[SingleAssetDataset]] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.datasets = datasets
self.render_tickers = render_tickers
self.attached_datasets = attached_datasets if attached_datasets is not None else []
assert self.datasets[0].num_days * len(self.datasets) == sum([dataset.num_days for dataset in self.datasets]), \
'All the datasets should have the same length.'
@property
def num_days(self) -> int:
# All the datasets have the same number of days, because they are reflecting the same time (eg. the same month).
return self.datasets[0].num_days
@property
def num_assets(self) -> int:
return len(self.datasets)
@property
def asset_tickers(self) -> List[str]:
return [dataset.ticker for dataset in self.datasets]
@cached_property
def should_render(self) -> bool:
return any([dataset.should_render for dataset in self.datasets])
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
# All the datasets have the same indices to dates mappings.
return self.datasets[0].index_to_datetime(integer_index)
def __len__(self):
# All the datasets have the same length.
return len(self.datasets[0])
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
datasets = self.datasets + self.attached_datasets
stacked_items: Dict[str, list] = defaultdict(list)
for dataset in datasets:
item = dataset[current_index]
for key, value in item.items():
stacked_items[key].append(value)
for key, value in stacked_items.items():
stacked_items[key] = np.stack(stacked_items[key], axis=2)
return stacked_items
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
for asset_idx in range(self.num_assets):
dataset = self.datasets[asset_idx]
observation = dataset.inverse_scaling(observation, asset_idx)
return observation
def __str__(self):
asset_tickers = [ticker.split('-')[0] for ticker in self.asset_tickers]
return '-'.join(asset_tickers)
def get_prices(self) -> pd.DataFrame:
prices = []
for dataset in self.datasets:
dataset_prices = dataset.get_prices()
dataset_prices = dataset_prices.assign(ticker=dataset.ticker)
dataset_prices = dataset_prices.set_index(keys=['ticker'], drop=True, append=True)
prices.append(dataset_prices)
prices = pd.concat(prices)
return prices
def get_labels(self, t_tick: Optional[int] = None) -> Union[pd.DataFrame, pd.Series]:
labels = []
for dataset in self.datasets:
ticker_labels = getattr(dataset, 'labels', pd.Series())
ticker_labels.name = dataset.ticker
labels.append(ticker_labels)
labels = pd.concat(labels, axis=1)
if len(labels) < t_tick:
return | pd.Series() | pandas.Series |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
if opname in ["__rmod__", "__rfloordiv__"]:
# exvals will have dtypes [f8, i8, i8] so expected will be
# all-f8, but the DataFrame operation will return mixed dtypes
# use exvals[-1].dtype instead of "i8" for compat with 32-bit
# systems/pythons
expected[False] = expected[False].astype(exvals[-1].dtype)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = | pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
import seaborn as sn
from sklearn.naive_bayes import GaussianNB, MultinomialNB, CategoricalNB
from DataLoad import dataload
from Classifier.Bayes.NaiveBayes import NaiveBayes
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
# Define the model
model = DecisionTreeClassifier()
# load data
train = dataload('./train.csv')
train_data = train.get_data()
train_ordinal = train.get_ordinal_data()
train_nominal = train.get_nominal_data()
missing_ordinal = train.get_ordinal_mean()
train_label = train.get_label()
test = dataload('./test.csv', missing_ordinal)
test_data = test.get_data()
test_ordinal = test.get_ordinal_data()
test_nominal = test.get_nominal_data()
test_label = test.get_label()
# normalization
train_ordinal = (train_ordinal - train_ordinal.min())/(train_ordinal.max() - train_ordinal.min())
test_ordinal = (test_ordinal - test_ordinal.min())/(test_ordinal.max() - test_ordinal.min())
#train_ordinal = (train_ordinal - train_ordinal.min())/(train_ordinal.std())
#test_ordinal = (test_ordinal - test_ordinal.min())/(test_ordinal.std())
#train_ordinal = normalize(train_ordinal, norm = 'l1', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l1', axis = 0)
#train_ordinal = normalize(train_ordinal, norm = 'l2', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l2', axis = 0)
# feature reduction
nc = 10
pca1 = PCA(n_components=nc, svd_solver='full')
train_ordinal = pca1.fit_transform(train_ordinal)
pca2 = PCA(n_components=nc, svd_solver='full')
test_ordinal = pca2.fit_transform(test_ordinal)
# transform to pandas dataframe
train_ordinal = | pd.DataFrame(train_ordinal) | pandas.DataFrame |
"""
Lineplot from a wide-form dataset
=================================
_thumb: .52, .5
"""
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(style="whitegrid")
rs = np.random.RandomState(365)
values = rs.randn(365, 4).cumsum(axis=0)
dates = pd.date_range("1 1 2016", periods=365, freq="D")
data = | pd.DataFrame(values, dates, columns=["A", "B", "C", "D"]) | pandas.DataFrame |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
from tqdm import tqdm
import multiprocessing
from linevul_model import Model
import pandas as pd
# metrics
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from sklearn.metrics import auc
# model reasoning
from captum.attr import LayerIntegratedGradients, DeepLift, DeepLiftShap, GradientShap, Saliency
# word-level tokenizer
from tokenizers import Tokenizer
cpu_cont = 16
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens,
input_ids,
label):
self.input_tokens = input_tokens
self.input_ids = input_ids
self.label=label
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_type="train"):
if file_type == "train":
file_path = args.train_data_file
elif file_type == "eval":
file_path = args.eval_data_file
elif file_type == "test":
file_path = args.test_data_file
self.examples = []
df = pd.read_csv(file_path)
funcs = df["processed_func"].tolist()
labels = df["target"].tolist()
for i in tqdm(range(len(funcs))):
self.examples.append(convert_examples_to_features(funcs[i], labels[i], tokenizer, args))
if file_type == "train":
for example in self.examples[:3]:
logger.info("*** Example ***")
logger.info("label: {}".format(example.label))
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label)
def convert_examples_to_features(func, label, tokenizer, args):
if args.use_word_level_tokenizer:
encoded = tokenizer.encode(func)
encoded = encoded.ids
if len(encoded) > 510:
encoded = encoded[:510]
encoded.insert(0, 0)
encoded.append(2)
if len(encoded) < 512:
padding = 512 - len(encoded)
for _ in range(padding):
encoded.append(1)
source_ids = encoded
source_tokens = []
return InputFeatures(source_tokens, source_ids, label)
#source
code_tokens = tokenizer.tokenize(str(func))[:args.block_size-2]
source_tokens = [tokenizer.cls_token] + code_tokens + [tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
padding_length = args.block_size - len(source_ids)
source_ids += [tokenizer.pad_token_id] * padding_length
return InputFeatures(source_tokens, source_ids, label)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, eval_dataset):
""" Train the model """
# build dataloader
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=0)
args.max_steps = args.epochs * len(train_dataloader)
# evaluate the model per epoch
args.save_steps = len(train_dataloader)
args.warmup_steps = args.max_steps // 5
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size//max(args.n_gpu, 1))
logger.info(" Total train batch size = %d",args.train_batch_size*args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step=0
tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0
best_f1=0
model.zero_grad()
for idx in range(args.epochs):
bar = tqdm(train_dataloader,total=len(train_dataloader))
tr_num = 0
train_loss = 0
for step, batch in enumerate(bar):
(inputs_ids, labels) = [x.to(args.device) for x in batch]
model.train()
loss, logits = model(input_ids=inputs_ids, labels=labels)
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num += 1
train_loss += loss.item()
if avg_loss == 0:
avg_loss = tr_loss
avg_loss = round(train_loss/tr_num,5)
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if global_step % args.save_steps == 0:
results = evaluate(args, model, tokenizer, eval_dataset, eval_when_training=True)
# Save model checkpoint
if results['eval_f1']>best_f1:
best_f1=results['eval_f1']
logger.info(" "+"*"*20)
logger.info(" Best f1:%s",round(best_f1,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-f1'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format(args.model_name))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def evaluate(args, model, tokenizer, eval_dataset, eval_when_training=False):
#build dataloader
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,batch_size=args.eval_batch_size,num_workers=0)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
(inputs_ids, labels)=[x.to(args.device) for x in batch]
with torch.no_grad():
lm_loss, logit = model(input_ids=inputs_ids, labels=labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
#calculate scores
logits = np.concatenate(logits,0)
y_trues = np.concatenate(y_trues,0)
best_threshold = 0.5
best_f1 = 0
y_preds = logits[:,1]>best_threshold
recall = recall_score(y_trues, y_preds)
precision = precision_score(y_trues, y_preds)
f1 = f1_score(y_trues, y_preds)
result = {
"eval_recall": float(recall),
"eval_precision": float(precision),
"eval_f1": float(f1),
"eval_threshold":best_threshold,
}
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
return result
def test(args, model, tokenizer, test_dataset, best_threshold=0.5):
# build dataloader
test_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size, num_workers=0)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in test_dataloader:
(inputs_ids, labels) = [x.to(args.device) for x in batch]
with torch.no_grad():
lm_loss, logit = model(input_ids=inputs_ids, labels=labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
# calculate scores
logits = np.concatenate(logits, 0)
y_trues = np.concatenate(y_trues, 0)
y_preds = logits[:, 1] > best_threshold
acc = accuracy_score(y_trues, y_preds)
recall = recall_score(y_trues, y_preds)
precision = precision_score(y_trues, y_preds)
f1 = f1_score(y_trues, y_preds)
result = {
"test_accuracy": float(acc),
"test_recall": float(recall),
"test_precision": float(precision),
"test_f1": float(f1),
"test_threshold":best_threshold,
}
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
logits = [l[1] for l in logits]
result_df = generate_result_df(logits, y_trues, y_preds, args)
sum_lines, sum_flaw_lines = get_line_statistics(result_df)
# write raw predictions if needed
if args.write_raw_preds:
write_raw_preds_csv(args, y_preds)
# define reasoning method
if args.reasoning_method == "all":
all_reasoning_method = ["attention", "lig", "saliency", "deeplift", "deeplift_shap", "gradient_shap"]
else:
all_reasoning_method = [args.reasoning_method]
if args.do_sorting_by_line_scores:
# (RQ2) Effort@TopK%Recall & Recall@TopK%LOC for the whole test set
# flatten the logits
for reasoning_method in all_reasoning_method:
dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, num_workers=0)
progress_bar = tqdm(dataloader, total=len(dataloader))
all_pos_score_label = []
all_neg_score_label = []
index = 0
total_pred_as_vul = 0
for mini_batch in progress_bar:
# if predicted as vulnerable
if result_df["logits"][index] > 0.5:
total_pred_as_vul += 1
all_lines_score_with_label = \
line_level_localization(flaw_lines=result_df["flaw_line"][index],
tokenizer=tokenizer,
model=model,
mini_batch=mini_batch,
original_func=result_df["processed_func"][index],
args=args,
top_k_loc=None,
top_k_constant=None,
reasoning_method=reasoning_method,
index=index)
all_pos_score_label.append(all_lines_score_with_label)
# else predicted as non vulnerable
else:
all_lines_score_with_label = \
line_level_localization(flaw_lines=result_df["flaw_line"][index],
tokenizer=tokenizer,
model=model,
mini_batch=mini_batch,
original_func=result_df["processed_func"][index],
args=args,
top_k_loc=None,
top_k_constant=None,
reasoning_method=reasoning_method,
index=index)
all_neg_score_label.append(all_lines_score_with_label)
index += 1
is_attention = True if reasoning_method == "attention" else False
total_pos_lines, pos_rank_df = rank_lines(all_pos_score_label, is_attention, ascending_ranking=False)
if is_attention:
total_neg_lines, neg_rank_df = rank_lines(all_neg_score_label, is_attention, ascending_ranking=True)
else:
total_neg_lines, neg_rank_df = rank_lines(all_neg_score_label, is_attention, ascending_ranking=False)
effort, inspected_line = top_k_effort(pos_rank_df, sum_lines, sum_flaw_lines, args.effort_at_top_k)
recall_value = top_k_recall(pos_rank_df, neg_rank_df, sum_lines, sum_flaw_lines, args.top_k_recall_by_lines)
logger.info(f"total functions predicted as vulnerable: {total_pred_as_vul}")
to_write = ""
to_write += "\n" + f"Reasoning Method: {reasoning_method}" + "\n"
to_write += f"total predicted vulnerable lines: {total_pos_lines}" + "\n"
logger.info(f"total predicted vulnerable lines: {total_pos_lines}")
to_write += f"total lines: {sum_lines}" + "\n"
logger.info(f"total lines: {sum_lines}")
to_write += f"total flaw lines: {sum_flaw_lines}" + "\n"
logger.info(f"total flaw lines: {sum_flaw_lines}")
vul_as_vul = sum(pos_rank_df["label"].tolist())
to_write += f"total flaw lines in predicted as vulnerable: {vul_as_vul}" + "\n"
logger.info(f"total flaw lines in predicted as vulnerable: {vul_as_vul}")
to_write += f"top{args.effort_at_top_k}-Effort: {effort}" + "\n"
logger.info(f"top{args.effort_at_top_k}-Effort: {effort}")
to_write += f"total inspected line to find out {args.effort_at_top_k} of flaw lines: {inspected_line}" + "\n"
logger.info(f"total inspected line to find out {args.effort_at_top_k} of flaw lines: {inspected_line}")
to_write += f"top{args.top_k_recall_by_lines}-Recall: {recall_value}" + "\n"
logger.info(f"top{args.top_k_recall_by_lines}-Recall: {recall_value}")
with open("./results/rq2_result.txt", "a") as f:
f.write(to_write)
if args.do_sorting_by_pred_prob:
rank_df = rank_dataframe(df=result_df, rank_by="logits", ascending=False)
effort, inspected_line = top_k_effort_pred_prob(rank_df, sum_lines, sum_flaw_lines, args.effort_at_top_k, label_col_name="y_preds")
top_k_recall_val = top_k_recall_pred_prob(rank_df, sum_lines, sum_flaw_lines, args.top_k_recall_by_pred_prob, label_col_name="y_preds")
with open("./results/rq2_result_pred_prob.txt", "a") as f:
f.write(f"\n Sorted By Prediction Probabilities \n top{args.effort_at_top_k}-Effort: {effort} \n top{args.top_k_recall_by_pred_prob}-Recall: {top_k_recall_val}")
logger.info(f"\n Sorted By Prediction Probabilities \n top{args.effort_at_top_k}-Effort: {effort} \n top{args.top_k_recall_by_pred_prob}-Recall: {top_k_recall_val}")
# (RQ3) Line level evaluation for True Positive cases
if args.do_local_explanation:
for reasoning_method in all_reasoning_method:
logger.info(f"***** Running Explanation - {reasoning_method} *****")
correct_indices = np.where((y_trues == y_preds))
correct_indices = list(correct_indices[0])
print("correct prediction count: ", len(correct_indices))
tp_indices = np.where((y_trues == y_preds) & (y_trues == 1))
tp_indices = list(tp_indices[0])
print("correct vulnerable count: ", len(tp_indices))
# localization part
dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=1, num_workers=0)
# prepare data for line-level reasoning
df = pd.read_csv(args.test_data_file)
# stats for line-level evaluation
top_k_locs = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
top_k_constant = [args.top_k_constant]
sum_total_lines = 0
total_flaw_lines = 0
total_function = 0
all_top_10_correct_idx = []
all_top_10_not_correct_idx = []
# for CodeBERT reasoning
total_correctly_predicted_flaw_lines = [0 for _ in range(len(top_k_locs))]
total_correctly_localized_function = [0 for _ in range(len(top_k_constant))]
total_min_clean_lines_inspected = 0
ifa_records = []
top_10_acc_records = []
total_max_clean_lines_inspected = 0
# vulnerability exist but not applicable (flaw tokens are out of seq length)
na_explanation_total = 0
na_eval_results_512 = 0
na_defective_data_point = 0
# track progress
progress_bar = tqdm(dataloader, total=len(dataloader))
# used to locate the row in test data
index = 0
for mini_batch in progress_bar:
# if true positive (vulnerable predicted as vulnerable), do explanation
if index in tp_indices:
# if flaw line exists
# if not exist, the data is as type of float (nan)
if isinstance(df["flaw_line"][index], str) and isinstance(df["flaw_line_index"][index], str):
line_eval_results = \
line_level_localization_tp(flaw_lines=df["flaw_line"][index],
tokenizer=tokenizer,
model=model,
mini_batch=mini_batch,
original_func=df["processed_func"][index],
args=args,
top_k_loc=top_k_locs,
top_k_constant=top_k_constant,
reasoning_method=reasoning_method,
index=index,
write_invalid_data=False)
if line_eval_results == "NA":
na_explanation_total += 1
na_eval_results_512 += 1
else:
total_function += 1
sum_total_lines += line_eval_results["total_lines"]
total_flaw_lines += line_eval_results["num_of_flaw_lines"]
# IFA metric
total_min_clean_lines_inspected += line_eval_results["min_clean_lines_inspected"]
# For IFA Boxplot
ifa_records.append(line_eval_results["min_clean_lines_inspected"])
# For Top-10 Acc Boxplot
# todo
#top_10_acc_records.append(line_eval_results[])
# All effort metric
total_max_clean_lines_inspected += line_eval_results["max_clean_lines_inspected"]
for j in range(len(top_k_locs)):
total_correctly_predicted_flaw_lines[j] += line_eval_results["all_correctly_predicted_flaw_lines"][j]
# top 10 accuracy
for k in range(len(top_k_constant)):
total_correctly_localized_function[k] += line_eval_results["all_correctly_localized_function"][k]
# top 10 correct idx and not correct idx
if line_eval_results["top_10_correct_idx"] != []:
all_top_10_correct_idx.append(line_eval_results["top_10_correct_idx"][0])
if line_eval_results["top_10_not_correct_idx"] != []:
all_top_10_not_correct_idx.append(line_eval_results["top_10_not_correct_idx"][0])
else:
na_explanation_total += 1
na_defective_data_point += 1
index += 1
# write IFA records for IFA Boxplot
with open(f"./ifa_records/ifa_{reasoning_method}.txt", "w+") as f:
f.write(str(ifa_records))
# write Top-10 Acc records for Top-10 Accuracy Boxplot
# todo
#with open(f"./top_10_acc_records/top_10_acc_{reasoning_method}.txt", "w+") as f:
# f.write(str())
logger.info(f"Total number of functions: {total_function}")
logger.info(f"Total number of lines: {sum_total_lines}")
logger.info(f"Total number of flaw lines: {total_flaw_lines}")
logger.info(f"Total Explanation Not Applicable: {na_explanation_total}")
logger.info(f"NA Eval Results (Out of 512 Tokens): {na_eval_results_512}")
logger.info(f"NA Defective Data Point: {na_defective_data_point}")
line_level_results = [{f"codebert_{reasoning_method}_top20%_recall":
[round(total_correctly_predicted_flaw_lines[i] / total_flaw_lines, 2) * 100 for i in range(len(top_k_locs))],
f"codebert_{reasoning_method}_top10_accuracy":
[round(total_correctly_localized_function[i] / total_function, 2) * 100 for i in range(len(top_k_constant))],
f"codebert_{reasoning_method}_ifa":
round(total_min_clean_lines_inspected / total_function, 2),
f"codebert_{reasoning_method}_recall@topk%loc_auc":
auc(x=top_k_locs, y=[round(total_correctly_predicted_flaw_lines[i] / total_flaw_lines, 2) for i in range(len(top_k_locs))]),
f"codebert_{reasoning_method}_total_effort":
round(total_max_clean_lines_inspected / sum_total_lines, 2),
"avg_line_in_one_func":
int(sum_total_lines / total_function),
"total_func":
total_function,
"all_top_10_correct_idx": all_top_10_correct_idx,
"all_top_10_not_correct_idx": all_top_10_not_correct_idx}]
with open('./results/line_level_correct_idx.pkl', 'wb') as f:
pickle.dump(all_top_10_correct_idx, f)
with open('./results/line_level_not_correct_idx.pkl', 'wb') as f:
pickle.dump(all_top_10_not_correct_idx, f)
logger.info("***** Line Level Result *****")
logger.info(line_level_results)
# output results
# with open("./results/local_explanation.pkl", "wb") as f:
# pickle.dump(line_level_results, f)
def generate_result_df(logits, y_trues, y_preds, args):
df = | pd.read_csv(args.test_data_file) | pandas.read_csv |
"""
Utility functions for ARNA campaign/project work
"""
import os
import sys
import glob
import gc
import numpy as np
import pandas as pd
import xarray as xr
import xesmf as xe
import AC_tools as AC
from netCDF4 import Dataset
from datetime import datetime as datetime_
import datetime as datetime
import time
from time import gmtime, strftime
import matplotlib.pyplot as plt
import matplotlib
def mk_core_plot_folders_then_mv2webfiles(dt=None, mv2webfiles=True,
debug=True):
"""
Make core folders (+?? hours), then move these to webfiles
"""
# Use yesterday's forecast at noon if others not available.
if isinstance(dt, type(None)):
Tnow = AC.time2datetime([gmtime()])[0]
# Get the 5-day forecast at noon...
dt = datetime.datetime(Tnow.year, Tnow.month, Tnow.day, 12, )
# Use yesterday
dt = AC.add_days(dt, -1)
# - mv the +24/+48 files into the core folders
dstr = dt.strftime('%Y/%m/%d %H:%M')
copy_files2core_plot_folders(dt=dt)
# - Now move the folder to webfiles
if mv2webfiles:
TNow = AC.time2datetime([gmtime()])[0]
pstr = "Started moving files for {} to webfiles @ {}"
print(pstr.format(dstr, TNow.strftime('%Y/%m/%d %H:%M')))
mv_plots2webfiles(dt=dt)
# - Now move the files to google drive
TNow = AC.time2datetime([gmtime()])[0]
pstr = "Started moving files for {} to google drive @ {}"
print(pstr.format(dstr, TNow.strftime('%Y/%m/%d %H:%M')))
# Move the files
mv_plots2google_drive(dt=dt, debug=debug)
# print that the job is finished.
TNow = AC.time2datetime([gmtime()])[0]
pstr = "Finished moving files for {} to google drive @ {}"
print(pstr.format(dstr, TNow.strftime('%Y/%m/%d %H:%M')))
def which_plot_folders_are_not_complete4dt(dt):
"""
Check which plots have been made for a specific datetime (dt)
"""
# Use yesterday's forecast at noon if others not available.
if isinstance(dt, type(None)):
Tnow = AC.time2datetime([gmtime()])[0]
# Get the 5-day forecast at noon...
dt = datetime.datetime(Tnow.year, Tnow.month, Tnow.day, 12, )
# Use yesterday
dt = AC.add_days(dt, -1)
# Get the root plot folder
folder = get_GEOS_data_folder4dt(
dt=dt, product='GEOS_5', inc_collection=False)
folder += '/plots/'
# Hardwire folder names to check
subfolders2check = [
'plots.GMAO',
'alt_slice',
'lon_slice',
'lat_slice',
'alt_slice.zoomed',
'alt_slice.individual'
]
if not os.path.isdir(folder):
return subfolders2check
else:
# Setup a dataframe to store values
df = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from scipy.signal import periodogram
from .misc import get_equivalent_days
import re
#%% plotting functions
def adjust_bright(color, amount=1.2):
"""
Adjust color brightness in plots for use.
Inputs
------
color: str | list,
color can be basic color string name or rgb list.
amount: float,
the level of brightness of the input color to be adjusted.
the higher the amount, the brighter the color is.
Returns
-------
color with brightness level adjusted.
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(
c[0], max(0, min(1, amount * c[1])), c[2])
def missingval_plot(df, figsize=(20,6), show=True):
"""
Visualize index location of missin values of each feature.
Doesn't work for 1-dim df.
df: pd.DataFrame
"""
# check all are bool
if (df.dtypes != bool).any():
df = df.reset_index().T.isna()
f, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
g = sns.heatmap(df, cmap='Blues', cbar=True,
yticklabels=df.index.values)
# customize colorbar
colorbar = g.collections[0].colorbar
colorbar.set_ticks([0, 1])
colorbar.set_ticklabels(['non-missing', 'missing'])
# customize title
ax.set_title('Distribution of Missing Values', fontsize=16)
# customize font size in ticks
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(12)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(12)
if show:
plt.show()
def plot_cv_indices(cv, X, y, ax, n_splits, lw=10):
"""
Create a sample plot for indices of a cross-validation object.
"""
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw,
cmap=plt.cm.coolwarm, vmin=-.2, vmax=1.2)
# Formatting
yticklabels = list(range(n_splits))
ax.set(yticks=np.arange(n_splits)+.5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits+.2, -.2], xlim=[0, len(X)])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
def corrtri_plot(df, figsize=(10,10)):
"""correlation plot of the dataframe"""
# sns.set() #: will cause plt warning later in lag_plot
c = df.corr()
mask = np.triu(c.corr(), k=1)
plt.figure(figsize=figsize)
plt.tick_params(axis='both', which='major', labelsize=10,
bottom=False, labelbottom=False,
top=False, labeltop=True)
g = sns.heatmap(c, annot=True, fmt='.1f', cmap='coolwarm',
square=True, mask=mask, linewidths=1, cbar=False)
plt.show()
def acpac_plot(data, features=[], figsize=(10,5)):
"""Autocorrelation and Partial-aurocorrelation plots."""
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
if features == []:
features = data.columns
for i, col in enumerate(features):
fig, ax = plt.subplots(1,2,figsize=figsize)
plot_acf(data[col], lags=30,
title='AC: ' + data[col].name,
ax=ax[0]) # missing='drop'
plot_pacf(data[col], lags=30,
title='PAC: ' + data[col].name,
ax=ax[1])
def residac_plot(model, cols=None, figsize=(16, 8), ylim=(-.3, .3)):
"""
model: var/vecm model (from statsmodels)
cols: can be integer/str list.
"""
# set up
if cols is not None:
cols = list(cols)
assert len(cols)==pd.DataFrame(model.resid).shape[1], \
"cols length not matched with model.resid columns."
else:
cols = list(model.names)
# make sure DataFrame type
resid = pd.DataFrame(model.resid)
if isinstance(model.resid, np.ndarray):
resid = | pd.DataFrame(resid, columns=cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["cns_cov_dat"]):
assert row["cns_group"]
else:
assert not row["cns_group"]
def test_resp_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF AST_GROUP <> NULL | Select | Next
if row["ast_group"]:
assert row["resp_group"]
continue
# IF RESP_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["resp_cov_dat"]):
assert row["resp_group"]
else:
assert not row["resp_group"]
def test_bmi_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_OBESITY_DAT > BMI_DAT | Select | Next
if gt(row["sev_obesity_dat"], row["bmi_dat"]):
assert row["bmi_group"]
continue
# IF BMI_VAL >=40 | Select | Reject
if gte(row["bmi_val"], 40):
assert row["bmi_group"]
else:
assert not row["bmi_group"]
def test_diab_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF DIAB_DAT > DMRES_DAT | Select | Reject
if gt(row["diab_dat"], row["dmres_dat"]):
assert row["diab_group"]
else:
assert not row["diab_group"]
def test_sevment_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_MENTAL_DAT > SMHRES_DAT | Select | Reject
if gt(row["sev_mental_dat"], row["smhres_dat"]):
assert row["sevment_group"]
else:
assert not row["sevment_group"]
def test_atrisk_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMUNOGROUP <> NULL | Select | Next
if row["immuno_group"]:
assert row["atrisk_group"]
continue
# IF CKD_GROUP <> NULL | Select | Next
if row["ckd_group"]:
assert row["atrisk_group"]
continue
# IF RESP_GROUP <> NULL | Select | Next
if row["resp_group"]:
assert row["atrisk_group"]
continue
# IF DIAB_GROUP <> NULL | Select | Next
if row["diab_group"]:
assert row["atrisk_group"]
continue
# IF CLD_DAT <>NULL | Select | Next
if pd.notnull(row["cld_dat"]):
assert row["atrisk_group"]
continue
# IF CNS_GROUP <> NULL | Select | Next
if row["cns_group"]:
assert row["atrisk_group"]
continue
# IF CHD_COV_DAT <> NULL | Select | Next
if pd.notnull(row["chd_cov_dat"]):
assert row["atrisk_group"]
continue
# IF SPLN_COV_DAT <> NULL | Select | Next
if pd.notnull(row["spln_cov_dat"]):
assert row["atrisk_group"]
continue
# IF LEARNDIS_DAT <> NULL | Select | Next
if pd.notnull(row["learndis_dat"]):
assert row["atrisk_group"]
continue
# IF SEVMENT_GROUP <> NULL | Select | Reject
if row["sevment_group"]:
assert row["atrisk_group"]
else:
assert not row["atrisk_group"]
def test_covax1d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVRX1_DAT <> NULL | Select | Next
if pd.notnull(row["covrx1_dat"]):
assert row["covax1d_group"]
continue
# IF COVADM1_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm1_dat"]):
assert row["covax1d_group"]
else:
assert not row["covax1d_group"]
def test_covax2d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["covax2d_group"]
continue
# IF COVRX2_DAT <> NULL | Select | Next
if pd.notnull(row["covrx2_dat"]):
assert row["covax2d_group"]
continue
# IF COVADM2_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm2_dat"]):
assert row["covax2d_group"]
else:
assert not row["covax2d_group"]
def test_unstatvacc1_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["unstatvacc1_group"]
continue
# IF AZD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF PFD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF MOD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF NXD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF JND1RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF GSD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF VLD1RX_DAT <> NULL | Reject | Select
if pd.notnull(row["vld1rx_dat"]):
assert not row["unstatvacc1_group"]
else:
assert row["unstatvacc1_group"]
def test_unstatvacc2_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX2D_GROUP <> NULL | Next | Reject
if not row["covax2d_group"]:
assert not row["unstatvacc2_group"]
continue
# IF AZD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF PFD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF MOD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF NXD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF JND2RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF GSD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF VLD2RX_DAT <> NULL | Reject | Select
if pd.notnull(row["vld2rx_dat"]):
assert not row["unstatvacc2_group"]
else:
assert row["unstatvacc2_group"]
def test_shield_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SHIELD_DAT = NULL | Reject | Next
if pd.isnull(row["shield_dat"]):
assert not row["shield_group"]
continue
# IF SHIELD_DAT <> NULL AND NONSHIELD_DAT = NULL | Select | Next
if ( | pd.notnull(row["shield_dat"]) | pandas.notnull |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import atn_analysis as atn
import atn_analysis
import db_tools
# Set global styles for plots
plt.rcParams["font.family"] = "Times New Roman"
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
line_type = {1:'-',2:'--',3:':',4:'-.'}
def remove_frequency(db_path, file, airline, include_data, can_limit, zs_limit, processed_direc):
"""
Creates a dictionary of airports and their removal frequency for a given airline
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a dictionary containing airport removal frequency values
Notes
-----
"""
df_net_tuple = pd.DataFrame()
df_net = atn_analysis.raw_query(db_path, file, airline)
df_net_tuple["Origin"] = df_net.Origin_Airport_Code
df_net_tuple["Destination"] = df_net.Destination_Airport_Code
graph = [tuple(x) for x in df_net_tuple.to_records(index=False)]
G = nx.Graph()
G.add_edges_from(graph)
tempG = G.copy()
Airport_Dict = {}
for i in G.nodes():
Airport_Dict[i] = 0
Total_List = get_remove_list(db_path, file,include_data, airline, can_limit, zs_limit, processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
airport_list = Total_List[j]
for l in airport_list:
tempG.remove_node(l)
Airport_Dict[l] = Airport_Dict[l] + 1
tempG = G.copy()
return(Airport_Dict)
def weighted_edge(db_path, file, airline):
"""
Creates a data frame of origin airports, destination airports and weights for each route
Parameters
----------
file: int
Year of selected data
airline: string
Airline to get data from
include_data: string
Type of airline data to query from csv
can_limit: int
Cancellation limit
zs_limit: int
The z-score limit
Returns
-------
Returns a data frame containing each respective weighted route from an origin airport to a destination
Notes
-----
"""
df = atn_analysis.raw_query(db_path, file, airline)
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
file_str = int(str(file)[:4])
if calendar.isleap(file_str) == 1:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status
weight_values = [math.log(y, 10) for y in df_tuple.Weight.values]
for i in range(0, len(weight_values)):
df_tuple.Weight.values[i] = weight_values[i]
return(df_tuple)
def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):
"""
Return a remove_list in a year (airline specific, include_data specific) based on cancelation limit and z_score limit.
Parameters
----------
file: int
Year of selected data
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
airline: string
Airline to get data from. This is the 2 letter airline code (ex: AA, UA, DL, WN)
can_limit: float
Cancellation Limit. Between 0 and 1
zs_limit: float
z-score limit. Between 0 and 1
Returns
-------
Pandas df
Notes
-----
"""
z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data)
#df_score = pd.read_csv(raw_file_drop, index_col="Date")
df_score = pd.read_csv(z_score_path, index_col = "Day_of_Year")
df_score.index = pd.to_datetime(df_score.index)
airport_list = df_score.columns.tolist()
df = atn_analysis.raw_query(db_path,file,airline)
df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions
df = df[df['Destination_Airport_Code'].isin(airport_list)]
by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()
by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()
by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status
#print(by_origin)
df_score["idx"] = df_score.index
df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)
df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])
df_score.columns = ["Date", "Airports", "Z_Score"]
df_score.set_index('Date')
df_score["Cancellations"] = by_origin.Can_Status
### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score
df_score["Z_score_9901"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)
#print(df_score)
### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to
### it and finally repeats this trend for all airports being considered.
df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')
#print(df_pivot)
s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()
s_nested = []
for k in s:
p = list(filter(None,k))
#p = filter(None,k)
s_nested.append(p)
#s_nested.extend(p)
return s_nested
def inv_average_shortest_path_length(graph, weight=None):
"""
Creates an unweight inverse average path length graph
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the IAPL unweighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_shortest_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (unweighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def inv_average_shortest_path_length_W(graph, weight=None):
"""
Creates the table atn_performance in the database at the specified input location if one does not exist.
Parameters
----------
graph: python graph object
weight: default
Returns
-------
Returns the inverse average path length weighted graph
Notes
-----
"""
avg = 0.0
if weight is None:
for node in graph:
avg_path_length = nx.single_source_dijkstra_path_length(graph, node) # get the shortest path lengths from source to all reachable nodes (weighted)
del avg_path_length[node] # Deletes source node from the list to avoid division by 0
inv_avg_path_length = copy.deepcopy(avg_path_length)
inv_avg_path_length.update((x, 1/y) for x, y in avg_path_length.items())
avg += sum(inv_avg_path_length.values())
n = len(graph)
if n == 1 or n == 0:
return 0
else:
return avg/(n*(n-1))
def Data_Driven_W(file_list, airline_list, include_data, can_limit, zs_limit, processed_direc, graph_direc):
"""
Calculate the cluster size and IAPL for each day in a year after removal based on data-driven method.
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
can_limit: float
Cancellation threshold
zs_limit: float
z-score threshold
Returns
-------
The cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for file in file_list:
## iteration of years first
figure_num = 1
CSV_df = pd.DataFrame(columns = airline_list)
for airline in airline_list:
# CSV_df[airline] = [1,2,3,4]
# CSV_file = "%s_DD_IAPL.csv" %(file)
# CSV_df.to_csv(CSV_file, index=False)
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE) (Weighted Graph)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
#day_IAPL = 0
day_CS = 0
#output_IAPL = []
output_CS = []
NoD = []
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
## Set up the weighted graph
G.add_weighted_edges_from(graph)
#print(G.nodes())
tempG = G.copy() #use temporary graph for the loop
## Remove list for the whole year
Total_Remove_List = get_remove_list(db_path,file,include_data, airline, can_limit, zs_limit,processed_direc)
if int(file)%4 == 0:
total_day = 366
else:
total_day = 365
for j in range(total_day):
## Remove the nodes in each day and get the CS and IAPL data
#day_IAPL = 0
Day_Remove_List = Total_Remove_List[j]
NoD.append(j)
for l in Day_Remove_List:
tempG.remove_node(l)
#largest_component_b = max(nx.connected_components(tempG), key=len)
#day_IAPL =(inv_average_shortest_path_length_W(tempG))
largest_component_b = max(nx.connected_components(tempG), key=len)
day_CS = len(largest_component_b)
#len(largest_component_b) = cluster size
#cluster fraction = cluster size/number of nodes
#output_IAPL.append(day_IAPL)
output_CS.append(day_CS)
#sum_IAPL = sum_IAPL + (inv_average_shortest_path_length(tempG))
tempG = G.copy()
## plotting command
plt.figure(figure_num)
#line = plt.plot(NoD,output_IAPL, label="{}".format(airline))
line = plt.plot(NoD,output_CS, label="{}".format(airline))
plt.legend()
#CSV_df[airline] = output_IAPL
CSV_df[airline] = output_CS
#CSV_file = "%s_DD_IAPL.csv" %(file)
CSV_file = "%s%s_DD_CS.csv" %(graph_direc,file)
CSV_df.to_csv(CSV_file, index=False)
#plt.title("{} Data Driven IAPL".format(str(file)))
plt.xlabel("Day")
#plt.ylabel("IAPL")
plt.ylabel("Cluster Size")
#plt.savefig("{}_Data_Driven_IAPL.png".format(str(file)))
plt.savefig("%s%s_Data_Driven_CS.png"%(graph_direc,file))
plt.show()
figure_num = figure_num + 1
def Pure_Graph_W_Shu(file_list, airline_list, include_data, processed_direc, rep_num):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after random removal based on Pure Graph method.
Random Removal set up by shuffle function
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
include_data: string
Specify what kind of data to include in processed flight data. See drop_flights in M-D File. Possible parameters are:
CC: Cancellations only
ADD: Arrival delays including diversions
ADM: Purely arrival delays excluding cancellations or diversions
DCC: Combined delay. If arrival delay is greater than a set threshold, the flight is considered cancelled
DD: Departure delays. Does not include cancelled or diverted flights.
rep_num: int
Number of repititions
Returns
-------
csv with the cluster size and IAPL for each day of the year after removal based on data-driven method.
Notes
-----
"""
for airline in airline_list:
rep_ite = 1
Total_AC = []
Total_Cluster_Size = []
Total_IAPL = []
for i in range(len(file_list)):
## initialize the output lists
Total_AC.append(0)
Total_Cluster_Size.append(0)
Total_IAPL.append(0)
## Save the data in csv
filename1 = "%s%s_ACR.csv" %(processed_direc,airline)
with open(filename1, 'w') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(file_list)
filename2 = "%s%s_IAPLR.csv" %(processed_direc,airline)
with open(filename2, 'w') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(file_list)
filename3 = "%s%s_CSR.csv" %(processed_direc,airline)
with open(filename3, 'w') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(file_list)
while rep_ite < rep_num+1:
## start the reptition
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
#df_net = pd.read_csv(comb_file, usecols=fields)
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
df_tuple.Weight = 1/df_tuple.Weight
## Output lists initialization:
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
G.add_weighted_edges_from(graph)
NodeNum = G.number_of_nodes()
#print('Weighted Alebraic Connectivity: ', nx.algebraic_connectivity(G))
year_AC.append(nx.algebraic_connectivity(G))
sum_IAPL = 0
sum_Cluster_Size = 0
IAPL_list = []
Cluster_Size_list = []
Remove_List = []
for node in G.nodes():
## Get the list of the airports
Remove_List.append(node)
## Shuffle the lists
random.shuffle(Remove_List)
for l in Remove_List:
G.remove_node(l)
if len(G.nodes()) != 0:
## Add up the data after removing each node
largest_component_b = max(nx.connected_components(G), key=len)
IAPL_list.append(inv_average_shortest_path_length_W(G))
Cluster_Size_list.append(len(largest_component_b)/NodeNum)
sum_IAPL = sum_IAPL + (inv_average_shortest_path_length_W(G))
sum_Cluster_Size = sum_Cluster_Size + len(largest_component_b)/NodeNum
## Save the data of the year
year_IAPL.append(sum_IAPL)
year_Cluster_Size.append(sum_Cluster_Size)
with open(filename1, 'a') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(year_AC)
with open(filename2, 'a') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(year_IAPL)
with open(filename3, 'a') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(year_Cluster_Size)
# print('Unweighted Summation of IAPL: ', sum_IAPL)
# print('Unweighted Summation of Cluster Size: ', sum_Cluster_Size)
# print('Unweighted IAPL list', IAPL_list)
for i in range(len(file_list)):
## Get the sum for the average
Total_AC[i] = Total_AC[i] + year_AC[i]
Total_IAPL[i] = Total_AC[i] + year_IAPL[i]
Total_Cluster_Size[i] = Total_Cluster_Size[i] + year_Cluster_Size[i]
rep_ite = rep_ite + 1
for i in range(len(file_list)):
## Get the average
Total_AC[i] = Total_AC[i]/rep_num
Total_IAPL[i] = Total_IAPL[i]/rep_num
Total_Cluster_Size[i] = Total_Cluster_Size[i]/rep_num
## Plotting Command:
plt.figure(num=1,figsize=(2.8,2.0),dpi=300)
# line1 = plt.plot(file_list,Total_IAPL, label="{}".format(airline))
plt.plot(file_list,Total_IAPL, label="{}".format(airline))
plt.legend()
plt.figure(num=2,figsize=(2.8,2.0),dpi=300)
# line2 = plt.plot(file_list,Total_Cluster_Size, label="{}".format(airline))
plt.plot(file_list,Total_Cluster_Size, label="{}".format(airline))
plt.legend()
plt.figure(num=3,figsize=(2.8,2.0),dpi=300)
# line3 = plt.plot(file_list,Total_AC, label="{}".format(airline))
plt.plot(file_list,Total_AC, label="{}".format(airline))
plt.legend()
plt.figure(1)
plt.title("IAPL (Random)")
plt.xlabel("Year")
plt.ylabel("IAPL")
plt.savefig("Pure_Graph_IAPLR.png")
plt.figure(2)
plt.title("Cluster Size (Random)")
plt.xlabel("Year")
plt.ylabel("Cluster Size")
plt.savefig("Pure_Graph_CSR.png")
plt.figure(3)
plt.title("Algebraic Connectivity (Random)")
plt.xlabel("Year")
plt.ylabel("Algebraic Connectivity")
plt.savefig("Pure_Graph_ACR.png")
plt.show()
def Pure_Graph_W_Tar(file_list,airline_list,processed_direc,graph_direc):
"""
Calculate the linear algebraic connectivity, cluster size and IAPL for each day in a year after targeted removal based on Pure Graph method.
Targeted removal set up by the degree of the nodes. (Remove the node with higher node first, degree calculated when the weight is set as flight frequency)
Parameters
----------
file_list: list
List contaning years to process
airline_list: list
List contaning airlines to process
Returns
-------
Graph with the removels.
Notes
-----
"""
line_type_iter = 0
for airline in airline_list:
line_type_iter += 1
year_IAPL = []
year_Cluster_Size = []
year_AC = []
for file in file_list:
## Get the directory path
script_dir = os.path.dirname(os.getcwd())
#comb_path = "data/processed/%s_%s_combined.csv" % (file,airline)
db_local_path = "data/processed/atn_db.sqlite"
## df set up from Keshav (NO CHANGE)
df = pd.DataFrame()
db_path = os.path.join(script_dir, db_local_path)
fields = ["Origin_Airport_Code", "Destination_Airport_Code", "Can_Status"]
#df_net = pd.read_csv(comb_file, usecols=fields)
df_net = atn_analysis.raw_query(db_path,file,airline)
df["Origin_Airport_Code"] = df_net.Origin_Airport_Code
df["Destination_Airport_Code"] = df_net.Destination_Airport_Code
df["Can_Status"] = df_net.Can_Status
by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()
airport_list = by_origin.index.tolist()
df = df[df['Destination_Airport_Code'].isin(airport_list)]
#print(df)
df_tuple = pd.DataFrame()
df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()
df_tuple["Origin"] = df_weighted.Origin_Airport_Code
df_tuple["Destination"] = df_weighted.Destination_Airport_Code
if int(file)%4 == 0:
days = 366
else:
days = 365
df_tuple["Weight"] = df_weighted.Can_Status/days
## Graph object initialization
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G1 = nx.Graph()
G1.add_weighted_edges_from(graph)
#print('Weighted Alebraic Connectivity: ', nx.algebraic_connectivity(G))
sum_IAPL = 0
sum_Cluster_Size = 0
IAPL_list = []
Cluster_Size_list = []
Remove_List = []
tempG = G1.copy()
# return(tempG.nodes())
## Get the remove list based on the node degree
while list(tempG.nodes()) != []:
# print('run')
MaxNode = list(tempG.nodes())[0]
MaxDegree = tempG.degree(MaxNode)
for node in tempG.nodes():
if tempG.degree(node) >= MaxDegree:
MaxNode = node
MaxDegree = tempG.degree(node)
tempG.remove_node(MaxNode)
Remove_List.append(MaxNode)
#print('Ordered List: ', Remove_List)
df_tuple.Weight = 1/df_tuple.Weight
graph = [tuple(x) for x in df_tuple.to_records(index=False)]
G = nx.Graph()
G.add_weighted_edges_from(graph)
year_AC.append(nx.algebraic_connectivity(G))
#print('Weighted Alebraic Connectivity: ', nx.algebraic_connectivity(G))
NodeNum = G.number_of_nodes()
## add on the data after every removal
for l in Remove_List:
G.remove_node(l)
if list(G.nodes()) != []:
largest_component_b = max(nx.connected_components(G), key=len)
IAPL_list.append(inv_average_shortest_path_length_W(G))
Cluster_Size_list.append((len(largest_component_b))/NodeNum)
sum_IAPL = sum_IAPL + (inv_average_shortest_path_length_W(G))
sum_Cluster_Size = sum_Cluster_Size + (len(largest_component_b))/NodeNum
year_IAPL.append(sum_IAPL)
year_Cluster_Size.append(sum_Cluster_Size)
plt.figure(1,figsize=(2.8,2.0),dpi=300)
line1 = plt.plot(file_list,year_IAPL, label="{}".format(airline),linestyle=line_type[line_type_iter], marker = 'o')
plt.xticks(file_list)
plt.xlabel('Year',fontsize=10)
plt.ylabel('IAPL',fontsize=10)
plt.legend()
# fig1, ax1 = plt.subplots(figsize=(2.8,1.8))
# sns.lineplot(ax=ax1,x=file_list,y=year_IAPL,label="{}".format(airline))
# ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.figure(2,figsize=(2.8,2.0),dpi=300)
line2 = plt.plot(file_list,year_Cluster_Size, label="{}".format(airline),linestyle=line_type[line_type_iter], marker = 'o')
plt.xticks(file_list)
plt.xlabel('Year',fontsize=10)
plt.ylabel('Cluster Size',fontsize=10)
plt.legend()
# fig2, ax2 = plt.subplots(figsize=(2.8,1.8))
# sns.lineplot(ax=ax2,x=file_list,y=year_Cluster_Size,label="{}".format(airline))
# ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.figure(3,figsize=(2.8,2.0),dpi=300)
line3 = plt.plot(file_list,year_AC, label="{}".format(airline),linestyle=line_type[line_type_iter], marker = 'o')
plt.xticks(file_list)
plt.xlabel('Year',fontsize=10)
plt.ylabel('Algebraic Connectivity',fontsize=10)
plt.legend()
## Save the data
filename1 = "%s%s_ACT.csv" %(processed_direc,airline)
with open(filename1, 'w') as myfile1:
wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)
wr1.writerow(file_list)
wr1.writerow(year_AC)
filename2 = "%s%s_IAPLT.csv" %(processed_direc,airline)
with open(filename2, 'w') as myfile2:
wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)
wr2.writerow(file_list)
wr2.writerow(year_IAPL)
filename3 = "%s%s_CST.csv" %(processed_direc,airline)
with open(filename3, 'w') as myfile3:
wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)
wr3.writerow(file_list)
wr3.writerow(year_Cluster_Size)
plt.figure(1)
#plt.title("IAPL (Target)")
# plt.xlabel("Year")
# plt.ylabel("IAPL")
IAPLT_path = "%starget_IAPL.pdf"%(graph_direc,)
plt.tight_layout()
plt.legend(labelspacing=0.15,fontsize=10)
plt.savefig(IAPLT_path)
print('Targeted IAPL graph saved to %s'%(IAPLT_path,))
plt.figure(2)
#plt.title("Cluster Size (Target)")
# plt.xlabel("Year")
# plt.ylabel("Cluster Size")
CST_path = "%starget_CST.pdf"%(graph_direc,)
plt.tight_layout()
plt.legend(labelspacing=0.15,fontsize=10)
plt.savefig(CST_path)
print('Targeted CS graph saved to %s'%(CST_path,))
plt.figure(3)
#plt.title("Algebraic Connectivity (Target)")
# plt.xlabel("Year")
# plt.ylabel("Algebraic Connectivity")
ACT_path = "%starget_ACT.pdf"%(graph_direc,)
plt.tight_layout()
plt.legend(labelspacing=0.15,fontsize=10)
plt.savefig(ACT_path)
print('Targeted AC graph saved to %s'%(ACT_path,))
# plt.show()
def ran_remove_shaded(year_list,airline_list,processed_direc,graph_direc):
"""
Creates a the shaded lineplot for the random removals - cluster size, algebraic connecticity, IAPL.
Parameters
----------
year_list: list
List of tears to plot
airline_list: list
List of airlines to plot
Returns
-------
Saves the plots to the /data/graphs folder in pdf format.
Notes
-----
"""
IAPL_df_all = pd.DataFrame(columns = ['Year','Airline','IAPL'])
CS_df_all = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])
AC_df_all = pd.DataFrame(columns = ['Year','Airline','AC'])
for airline in airline_list:
script_dir = os.path.dirname(os.getcwd())
CS_path = "%s%s_CSR.csv" %(processed_direc,airline)
CS_file = os.path.join(script_dir,CS_path)
CS_df = pd.read_csv(CS_file)
IAPL_path = "%s%s_IAPLR.csv" %(processed_direc,airline)
IAPL_file = os.path.join(script_dir,IAPL_path)
IAPL_df = pd.read_csv(IAPL_file)
AC_path = "%s%s_ACR.csv" %(processed_direc,airline)
AC_file = os.path.join(script_dir,AC_path)
AC_df = pd.read_csv(AC_file)
CS_df_airline = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])
CS_year_df = pd.DataFrame()
IAPL_df_airline = pd.DataFrame(columns = ['Year','Airline','IAPL'])
IAPL_year_df = pd.DataFrame()
AC_df_airline = pd.DataFrame(columns = ['Year','Airline','AC'])
AC_year_df = pd.DataFrame()
col = 0
for year in year_list:
CS_year_df['Cluster_Size'] = CS_df.iloc[:,col]
CS_quant_calc = CS_year_df.quantile([0.25,0.5,0.75])
CS_quant_calc['Year'] = year
CS_df_airline = pd.concat([CS_df_airline,CS_quant_calc],ignore_index=True)
IAPL_year_df['IAPL'] = IAPL_df.iloc[:,col]
IAPL_quant_calc = IAPL_year_df.quantile([0.25,0.5,0.75])
IAPL_quant_calc['Year'] = year
IAPL_df_airline = pd.concat([IAPL_df_airline,IAPL_quant_calc],ignore_index=True)
AC_year_df['AC'] = AC_df.iloc[:,col]
AC_quant_calc = AC_year_df.quantile([0.5,0.5,0.5])
AC_quant_calc['Year'] = year
AC_df_airline = pd.concat([AC_df_airline,AC_quant_calc],ignore_index=True)
col = col + 1
CS_df_airline['Airline'] = airline
CS_df_all = pd.concat([CS_df_all,CS_df_airline],ignore_index = True)
IAPL_df_airline['Airline'] = airline
IAPL_df_all = | pd.concat([IAPL_df_all,IAPL_df_airline],ignore_index = True) | pandas.concat |
import pandas as pd
from pandas import HDFStore
import numpy as np
import subprocess
import io
import matplotlib.pyplot as plt
import gc
import os
from scipy.stats import ks_2samp
from functools import lru_cache
'''
Analyze wsprspots logs (prepared by WSPRLog2Pandas)
All manipulations are performed against an HDF5 store
with all reports in the "norm" dataset and the image
reports in the 'img' dataset.
'''
# utility functions
def absRangeMask(ser, min, max):
return (abs(ser) < min) | (abs(ser) > max)
def findrange(v):
for expd in range(-16,16):
for mant in [1, 2, 5]:
lim = mant * (10 ** expd)
#print("test v = %g lim = %g\n" % (v, lim))
if v < lim:
return lim
return v
# numeric filter functions
def identityFunc(v):
return v
def roundAZ(val, min, interval):
rv = roundInterval(val, min, interval)
rv[rv == 360] = 0
return rv
def roundInterval(val, min, interval):
'''
Round val to the nearest value that is min + N*interval
'''
Nf = (val - min)/interval
N = np.round(Nf)
return min + N * interval
def truncInterval(val, min, interval):
Nf = (val - min)/interval
N = np.trunc(Nf)
return min + N * interval
@lru_cache(maxsize=64)
def lamInterval(func, min, interval):
return lambda x : func(x, min, interval)
class WSPRImg:
def __init__(self, store_name, file_list=None, exp_name='', use_hdf5_store=False):
# init these sets as empty -- they're used as filters
# in the get chunks method.
self.bad_rx_calls = set()
self.bad_txrx_pairs = set()
self.exp_name = exp_name;
if (file_list == None) or (use_hdf5_store and os.path.isfile(store_name)):
print("Using HDF5 Store")
self.store = pd.HDFStore(store_name, mode='r', complib='zlib', complevel=9)
print("Got here to build exc list")
self.buildExclusionLists()
else:
self.readFilesToHDF5(file_list, store_name)
self.store.info
def __del__(self):
self.store.close()
del self.bad_rx_calls
del self.bad_txrx_pairs
gc.collect()
def readFilesToHDF5(self, flist, store_name):
'''
Read a list of files into an HDF5 store.
Read the input in chunks to keep storage to a minimum. '
The store will be written to two datasets in the hdf5 file.
'norm' is the set of all reports, and 'img' is the set of all
image reports.
On the way in, we create a new column "TXRX" with the catenation
of the TXCALL and RXCALL values.
'''
col_types = {'RXSOL':float, 'TXSOL':float, 'MIDSOL':float,
'SPOT':int, 'DTIME':int, 'DIFFSNR':float,
'RXCALL':str, 'TXCALL':str, 'RXGRID':str, 'TXGRID':str,
'REFSNR':float, 'FREQ':float, 'POW':float, 'DRIFT':float,
'DIST':float, 'AZ':float, 'BAND':str, 'VER':str, 'CODE':int,
'FREQDIFF':float}
item_sizes = {'RXCALL': 12, 'TXCALL':12, 'RXGRID':8, 'TXGRID':8,
# 'VER': 20, # 'BAND': 20, # 'CODE': 20,
'TXRX':24}
col_names = list(col_types.keys())
col_names.append('TXRX')
csize = 1024 * 1024 # read 1M records at a time.
# create the datastore
#self.store = pd.HDFStore(store_name, complib='zlib', complevel=9, columns=col_names, format='table')
# we accumulate bad lists along the way, and delete suspect contacts
#self.store = pd.HDFStore(store_name, mode='w', complib='zlib', complevel=9, format='table')
#self.store = pd.HDFStore(store_name, mode='w', complib='blosc:lz4', complevel=9, format='table')
self.store = pd.HDFStore(store_name, mode='w', complib='bzip2', complevel=9, format='table')
rcount = 0
for fn in flist:
for chunk in pd.read_csv(fn, dtype=col_types, chunksize=csize):
chunk['TXRX'] = chunk['TXCALL'].str.cat(chunk['RXCALL'])
chunk = chunk.drop(['BAND','VER','CODE','SPOT'],axis=1)
# create the image frame
img_chunk = self.filterLineFreqs(self.getImageFrame(chunk))
# now accumulate the bad calls
self.buildExclusionListOTF(img_chunk)
for col in ['RXSOL','TXSOL','MIDSOL','DIFFSNR','REFSNR','POW','DRIFT','AZ','FREQDIFF']:
chunk[col] = chunk[col].astype(np.float32)
# remove them from the chunk
chunk2 = chunk[~(chunk.RXCALL.isin(self.bad_rx_calls) | chunk.TXRX.isin(self.bad_txrx_pairs))]
# save all reports in the norm table
self.store.append('norm', chunk2, data_columns=True, min_itemsize = item_sizes)
# save image reports in the image table
self.store.append('img', self.filterLineFreqs(self.getImageFrame(chunk2)),
data_columns=True, min_itemsize = item_sizes)
del chunk
del chunk2
del img_chunk
print("%d\n" % rcount, end='')
rcount = rcount + csize
gc.collect()
return
def getImageFrame(self, fr):
return fr[fr.FREQDIFF != 0]
def filterLineFreqs(self, fr):
# return an all true mask
msk = fr.FREQDIFF < 1e19
for min,max in ((58,62), (48,52)):
for mul in (1, 2, 3):
lmin = min * mul
lmax = max * mul
nmsk = absRangeMask(fr.FREQDIFF, lmin, lmax)
msk = msk & nmsk
return fr[msk]
def buildExclusionListOTF(self, chunk):
'''
Build the exclusion list as we read it. (on the fly)
'''
# build the series from the value counts in a chunk
# then turn the series into a set (s = set(ser.unique())
# isin can test against a set (!)
# merge the sets as s1.union(s2) (does not modify either set...) or s1 | s2
tmp_rx_counts = chunk['RXCALL'].value_counts()
tmp_txrx_counts = chunk['TXRX'].value_counts()
srx = set(tmp_rx_counts[tmp_rx_counts > 4].index.to_series().unique())
stxrx = set(tmp_txrx_counts[tmp_txrx_counts > 3].index.to_series().unique())
self.bad_rx_calls = self.bad_rx_calls.union(srx)
self.bad_txrx_pairs = self.bad_txrx_pairs.union(stxrx)
print("bad_rx_calls len = %d bad_tx_pairs len = %d\n" % (len(self.bad_rx_calls), len(self.bad_txrx_pairs)))
def buildExclusionLists(self):
'''
When we process blocks of records, we need to skip records that
may be suspect in origin. These include records from RX stations
that report too many image events, and TX/RX pairs that report more
than 5 events.
These lists are called bad_rx_calls and bad_txrx_pairs
'''
# iterate through all the records in blocks of 100K rows
# we only need to scan the image list.
tmp_rx_counts = pd.Series([])
tmp_txrx_counts = pd.Series([])
chunksize = 100000
nrows = self.getDataSetSize('img')
for beg in range(0, nrows, chunksize):
chunk = self.getChunk('img', beg, chunksize);
tmp_rx_counts = tmp_rx_counts.append(chunk['RXCALL'].value_counts())
tmp_txrx_counts = tmp_txrx_counts.append(chunk['TXRX'].value_counts())
tmp_rx_counts = tmp_rx_counts.groupby(tmp_rx_counts.index).sum()
tmp_txrx_counts = tmp_txrx_counts.groupby(tmp_txrx_counts.index).sum()
self.bad_rx_calls = set(tmp_rx_counts[tmp_rx_counts > 4].index.to_series().unique())
self.bad_txrx_pairs = set(tmp_txrx_counts[tmp_txrx_counts > 3].index.to_series().unique())
def badFreqDiff(self, fdiff):
m50 = fdiff % 50
m60 = fdiff % 60
return (fdiff != 0) & ((m50 > 47) | (m50 < 3) | (m60 > 57) | (m60 < 3))
def getDataSetSize(self, data_set_name):
return self.store.get_storer(data_set_name).nrows
@lru_cache(maxsize=16)
def getChunk(self, data_set_name, beg, length):
'''
Get a chunk of (data_set)from the HDF store, filter out reports with the bad lists.
'''
chunk = self.store.select(key=data_set_name, start=beg, stop=beg+length-1)
return chunk[~(chunk.RXCALL.isin(self.bad_rx_calls) | chunk.TXRX.isin(self.bad_txrx_pairs) | self.badFreqDiff(chunk.FREQDIFF))]
@lru_cache(maxsize=16)
def storeValueCount(self, data_set_name, column, binfunc = identityFunc, sort=True):
'''
Read the store in chunks and do a value count.
We could do this all in one swoop, but it will allocate wayyyy too much memory
for large datasets.
So we read this in chunks as per a useful stack overflow suggestion
https://stackoverflow.com/questions/26019834/value-counts-of-a-database-chunk-by-chunk-using-pandas
For tables where the index is a category, the bin function should return its sole argument.
'''
nrows = self.getDataSetSize(data_set_name)
chunksize = 500000
temp = | pd.Series([]) | pandas.Series |
import cv2
import os
import pandas as pd
import pickle
import random
import zipfile
from ml.repository import TextDataset, ClassificationDataset
from ml.utils import LogMixin
from ml.utils.io import download_url
class BBCNews(LogMixin):
"""Internal class to handle the download, unpack and merging of the bbc
news dataset."""
ZIP_NAME = 'bbc.zip'
EXTRACTED_FOLDER_NAME = 'bbc'
CSV_FILE_NAME = 'bbc-news.csv'
FEATURE_TEXT_LABEL = 'text'
TARGET_LABEL = 'target'
CATS = ['business', 'entertainment', 'politics', 'sport', 'tech']
def __init__(self, base_path: str):
self.base_path = str(base_path)
@staticmethod
def _process_files(path, category):
texts = []
folder = os.path.join(path, category)
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
with open(file_path, 'r', encoding="utf-8", errors="replace") as fhandle:
text = fhandle.read()
texts.append((text, category))
return texts
def retrieve(self) -> TextDataset:
"""Retrieve the bbc news dataset.
If necessary it will download, unpack and merge the dataset.
Returns a tuple of DataFrame containing the text and the target,
the name of the text feature and the name of the target (y)."""
# Download file
path = self.base_path
url = 'http://mlg.ucd.ie/files/datasets/bbc-fulltext.zip'
zip_file = os.path.join(path, self.ZIP_NAME)
if not os.path.exists(zip_file):
os.makedirs(path, exist_ok=True)
self._logger.info("Downloading '%s'", url)
download_url(url, zip_file)
else:
self._logger.info("'%s' already exists. Skipping download", zip_file)
# Unzip file
extract_target = os.path.join(path, self.EXTRACTED_FOLDER_NAME)
if not os.path.exists(extract_target):
self._logger.info("Unzipping '%s' to '%s'", zip_file, path)
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(path)
else:
self._logger.info("'%s' already exists. Skipping unzip", extract_target)
data = None
# Process files -> Merge them into one file and provide a dataframe
process_target = os.path.join(path, self.CSV_FILE_NAME)
if os.path.exists(process_target):
self._logger.info("'%s' already exists. Skipping file merge", process_target)
data = pd.read_csv(process_target)
if data is None:
res = []
for category in self.CATS:
self._logger.info("Merging category '%s'", category)
res += self._process_files(extract_target, category)
data = pd.DataFrame(res, columns=[self.FEATURE_TEXT_LABEL, self.TARGET_LABEL])
data.to_csv(process_target, index=False)
return TextDataset(
data=data,
text_column=self.FEATURE_TEXT_LABEL,
target_column=self.TARGET_LABEL
)
class Iris(LogMixin):
def __init__(self, base_path: str):
self.base_path = str(base_path)
def retrieve(self) -> ClassificationDataset:
"""Retrieve the iris datasets from ics."""
data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
features = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
target = 'class'
data_path = os.path.join(self.base_path, 'iris.csv')
if not os.path.isfile(data_path):
os.makedirs(self.base_path, exist_ok=True)
download_url(data_url, data_path)
else:
self._logger.info("Data file '%s' already exists. Skipping download...", data_path)
data = pd.read_csv(data_path, header=None, names=features + [target])
return ClassificationDataset(
data=data,
feature_columns=features,
target_column=target
)
class CatsVsDogs(LogMixin):
_EXTRACTED_FOLDER_NAME = 'PetImages'
_VECTOR_FILE = 'vector.pkl'
_IMAGE_SIZE_WIDTH = 100
_IMAGE_SIZE_HEIGHT = 100
_FEATURE_LABEL = 'image'
_TARGET_LABEL = 'target'
def __init__(self, base_path: str):
self.base_path = str(base_path)
def retrieve(self) -> ClassificationDataset:
"""Retrieve the cats and dogs dataset from microsoft."""
data_url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/' \
'kagglecatsanddogs_3367a.zip'
zip_file = os.path.join(self.base_path, 'images.zip')
if not os.path.isfile(zip_file):
os.makedirs(self.base_path, exist_ok=True)
download_url(data_url, zip_file)
else:
self._logger.info("Data file '%s' already exists. Skipping download...", zip_file)
# Unzip file
extract_target = os.path.join(self.base_path, self._EXTRACTED_FOLDER_NAME)
if not os.path.exists(extract_target):
self._logger.info("Unzipping '%s' to '%s'", zip_file, self.base_path)
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(self.base_path)
else:
self._logger.info("'%s' already exists. Skipping unzip...", extract_target)
vector_file = os.path.join(self.base_path, self._VECTOR_FILE)
if not os.path.exists(vector_file):
self._logger.info("Creating vector from '%s'", extract_target)
dataset_root = extract_target
categories = ['Dog', 'Cat']
data = []
for animal_type in categories:
path = os.path.join(dataset_root, animal_type)
class_num = categories.index(animal_type)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized = cv2.resize(img_array, (self._IMAGE_SIZE_WIDTH, self._IMAGE_SIZE_HEIGHT))
data.append((resized, class_num))
except Exception: # pylint: disable=broad-except
pass
self._logger.info("Storing result in '%s'", vector_file)
random.shuffle(data)
with open(vector_file, 'wb') as fp:
pickle.dump(data, fp)
else:
self._logger.info("Loading vector from '%s'", vector_file)
with open(vector_file, 'rb') as fp:
data = pickle.load(fp)
df = | pd.DataFrame(data, columns=[self._FEATURE_LABEL, self._TARGET_LABEL]) | pandas.DataFrame |
"""Road network risks and adaptation maps
"""
import os
import sys
from collections import OrderedDict
import ast
import numpy as np
import geopandas as gpd
import pandas as pd
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from vtra.utils import *
def main():
config = load_config()
hazard_cols = ['hazard_type','climate_scenario','year']
duration = 10
hazard_set = [
{
'hazard': 'landslide',
'name': 'Landslide'
},
{
'hazard': 'flashflood',
'name':'Flashflood'
},
{
'hazard': 'flooding',
'name': 'Fluvial flooding'
},
{
'hazard': 'typhoon flooding',
'name': 'Typhoon flooding'
}
]
change_colors = ['#1a9850','#66bd63','#a6d96a','#d9ef8b','#fee08b','#fdae61','#f46d43','#d73027','#969696']
change_labels = ['< -40','-40 to -20','-20 to -10','-10 to 0','0 to 10','10 to 20','20 to 40',' > 40','No change/value']
change_ranges = [(-1e10,-40),(-40,-20),(-20,-10),(-10,0),(0.001,10),(10,20),(20,40),(40,1e10)]
eael_set = [
{
'column': 'min_eael',
'title': 'Min EAEL',
'legend_label': "Expected Annual losses (million USD)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_eael',
'title': 'Max EAEL',
'legend_label': "Expected Annual losses (million USD)",
'divisor': 1000000,
'significance': 0
}
]
adapt_set = [
{
'column': 'min_eael',
'title': 'Min EAEL',
'legend_label': "Expected Annual losses (million USD)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_eael',
'title': 'Max EAEL',
'legend_label': "Expected Annual losses (million USD)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_ini_adap_cost',
'title': 'Min Initial Investment',
'legend_label': "Initial investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_ini_adap_cost',
'title': 'Max Initial Investment',
'legend_label': "Initial investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_benefit',
'title': 'Min Benefit over time',
'legend_label': "Benefit (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_benefit',
'title': 'Max Benefit over time',
'legend_label': "Benefit (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_tot_adap_cost',
'title': 'Min Investment over time',
'legend_label': "Total Investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_tot_adap_cost',
'title': 'Max Investment over time',
'legend_label': "Total Investment (USD million)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_ini_adap_cost_perkm',
'title': 'Min Initial Investment per km',
'legend_label': "Initial Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_ini_adap_cost_perkm',
'title': 'Max Initial Investment per km',
'legend_label': "Initial Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_tot_adap_cost_perkm',
'title': 'Min Investment per km over time',
'legend_label': "Total Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'max_tot_adap_cost_perkm',
'title': 'Max Investment per km over time',
'legend_label': "Total Investment (USD million/km)",
'divisor': 1000000,
'significance': 0
},
{
'column': 'min_bc_ratio',
'title': 'Min BCR of adaptation over time',
'legend_label': "BCR",
'divisor': 1,
'significance': 0
},
{
'column': 'max_bc_ratio',
'title': 'Max BCR of adaptation over time',
'legend_label': "BCR",
'divisor': 1,
'significance': 0
}
]
adapt_cols = ['min_benefit','min_ini_adap_cost','min_ini_adap_cost_perkm','min_tot_adap_cost','min_tot_adap_cost_perkm','min_bc_ratio',\
'max_benefit','max_ini_adap_cost','max_ini_adap_cost_perkm','max_tot_adap_cost','max_tot_adap_cost_perkm','max_bc_ratio']
region_file_path = os.path.join(config['paths']['data'], 'post_processed_networks',
'road_edges.shp')
flow_file_path = os.path.join(config['paths']['output'], 'failure_results','minmax_combined_scenarios',
'single_edge_failures_minmax_national_road_100_percent_disrupt.csv')
region_file = gpd.read_file(region_file_path,encoding='utf-8')
flow_file = | pd.read_csv(flow_file_path) | pandas.read_csv |
# vim: fdm=indent
'''
author: <NAME>
date: 01/11/17
content: Try to see where in the sorting plots are successful and failed
cells for different colon cell types (after RNA-Seq annotation).
'''
# Modules
import os
import sys
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
os.environ['SINGLET_CONFIG_FILENAME'] = 'singlet.yml'
sys.path.append('/home/fabio/university/postdoc/singlet')
from singlet.dataset import Dataset
from singlet.counts_table import CountsTable
from singlet.samplesheet import SampleSheet
# Globals
config = {
'colon': {
'dead stain': 'CD45-DAPI: Pacific Blue-A',
'plots': [{
'x': 'CD44: APC-A',
'y': 'CD66a: PE-A',
}, {
'x': 'CD44: APC-A',
'y': '',
}],
'xlim': {'CD44: APC-A': (2e2, 5e4)},
'ylim': {'CD66a: PE-A': (7e2, 5e5)},
},
'trachea': {
'dead stain': 'dead: Sytox blue-A',
},
'pancreas': {
'dead stain': 'dead: APC-Cy7-A',
'legend': {
'fontsize': 8,
},
},
'heart': {
'dead stain': 'PI-A',
'antibodies': {
'PI-A': 'dead: PI-A',
},
'legend': {
'ncol': 2,
'fontsize': 8,
},
'xlim': {'FSC-A': (1, 8e5)},
'ylim': {'SSC-A': (5e3, 1e6)},
},
'aorta': {
'dead stain': 'PI-A',
'antibodies': {
'PI-A': 'dead: PI-A',
},
},
'spleen': {
'annotation glob': 'Spleen',
'dead stain': 'PI-A',
'antibodies': {
'PI-A': 'dead: PI-A',
},
'xlim': {'FSC-A': (1, 9.5e5)},
'ylim': {'SSC-A': (5e3, 1e6)},
'legend': {
'ncol': 2,
'fontsize': 8,
},
},
'tongue': {
'dead stain': 'Brilliant Violet 421-A',
'plots': [{
'x': 'FITC-A',
'y': 'APC-A',
}],
'antibodies': {
'Brilliant Violet 421-A': 'dead: Brilliant Violet 421-A',
# FIXME: ask them this thing!
'FITC-A': '???: FITC-A',
'APC-A': '???: APC-A',
},
'xlim': {'FITC-A': (3e2, 1e5)},
'ylim': {'APC-A': (1e2, 1e5)},
'legend': {
'ncol': 2,
'fontsize': 8,
},
},
'bladder': {
'annotation glob': 'Bladder',
'dead stain': 'Brilliant Violet 421-A',
'plots': [{
'x': 'FITC-A',
'y': 'APC-A',
}],
'antibodies': {
'Brilliant Violet 421-A': 'dead: Brilliant Violet 421-A',
# FIXME: ask them this thing!
'FITC-A': '???: FITC-A',
'APC-A': '???: APC-A',
},
'legend': {
'ncol': 2,
},
},
'brain_neuron': {
'annotation glob': 'BrainNeuron',
'dead stain': 'Lineage: Brilliant Violet 421-A',
},
'brain_microglia': {
'annotation glob': 'brainMicroglia',
'dead stain': 'Live/dead: PI-A',
'plots': [{
'x': 'CD11b: Brilliant Violet 421-A',
'y': 'CD45: PE-Cy7-A',
}],
},
'kidney': {
'dead stain': 'dead: PI-A',
'legend': {
'ncol': 2,
'fontsize': 8,
},
},
'skin': {
'dead stain': 'Pacific Blue-A',
'plots': [{
'x': 'FITC-A',
'y': 'APC-A',
}],
'antibodies': {
'Pacific Blue-A': 'dead: Pacific Blue-A',
'FITC-A': 'a6 Integrin: FITC-A',
'APC-A': 'CD34: Alexa647-A',
},
'ylim': {'APC-A': (1e1, 7e3)},
},
'fat': {
'dead stain': 'Pacific Blue-A',
'legend': {
'ncol': 2,
'fontsize': 8,
},
'plots': [{
'x': 'FITC-A',
'y': 'PE-Cy7-A',
}, {
'x': 'APC-A',
'y': 'FSC-A',
}],
'antibodies': {
'Pacific Blue-A': 'dead: Pacific Blue-A',
'FITC-A': 'CD31: FITC-A',
'APC-A': 'SCA-1: APC-A',
'PE-Cy7-A': 'CD45: PE-Cy7-A',
},
},
'liver': {
},
'lung': {
},
# Muscle is all ARIA sorting at the VA
'diaphragm': {
'sorter': 'ARIA',
'dead stain': None, # ??
'antibodies': {
'Pacific Blue-A': 'SCA-1: Pacific Blue-A',
'FITC-A': 'CD31: FITC-A',
'APC-A': 'CD45: APC-A',
'PE-Cy7-A': 'VCAM: PE-Cy7-A',
},
'xlim': {'FSC-A': (1e2, 2.7e5)},
'ylim': {'SSC-A': (3e3, 3e5)},
'legend': {
'ncol': 2,
'fontsize': 8,
},
},
'muscle': {
'sorter': 'ARIA',
'dead stain': None, # ??
'antibodies': {
'Pacific Blue-A': 'Ly-6A/E: Pacific Blue-A',
'FITC-A': 'CD45: FITC-A',
'APC-A': 'CD31: APC-A',
'PE-Cy7-A': 'CD106: PE-Cy7-A',
},
'xlim': {'FSC-A': (1e2, 2.67e5)},
'ylim': {'SSC-A': (3e3, 3e5)},
'legend': {
'ncol': 2,
'fontsize': 8,
},
},
}
# Functions
def parse_facs_plate(tissue, plate):
sorter = config[tissue].get('sorter', 'Sony')
if sorter == 'Sony':
return parse_facs_plate_sony(plate)
else:
return parse_facs_plate_aria(plate)
def parse_facs_plate_aria(plate):
import glob
import fcsparser
out = {}
fdn = '../../data/MACAFACS/index_fcs_*/'
glob_fcs = fdn+plate+'.fcs'
fn_fcs = glob.glob(glob_fcs)
if len(fn_fcs) == 0:
raise IOError('FCS file not found')
if len(fn_fcs) > 1:
raise IOError('Multiple FCS files found')
fn_fcs = fn_fcs[0]
glob_index = fdn+plate+'_Index.fcs'
fn_index = glob.glob(glob_index)
if len(fn_index) == 0:
raise IOError('Index file not found')
if len(fn_index) > 1:
raise IOError('Multiple index files found')
fn_index = fn_index[0]
meta, data = fcsparser.parse(fn_fcs, reformat_meta=True)
out['fcs_meta'] = meta
out['fcs_data'] = data
meta_index, data_index = fcsparser.parse(
fn_index,
meta_data_only=False,
reformat_meta=True)
# Figure in what wells the cells got sorted (the Aria's FCS is a mess)
data_index['Index'] = 'A0'
i = 1
slstring = ''
while 'INDEX SORTING LOCATIONS_'+str(i) in meta_index:
slstring += meta_index['INDEX SORTING LOCATIONS_'+str(i)]
i += 1
itot = 0
for sl in slstring.rstrip(';').split(';'):
row, col = tuple(map(int, sl.split(',')))
a24 = chr(65 + row)+str(col+1)
data_index.loc[itot, 'Index'] = a24
data_index.loc[itot, 'name'] = plate+'_'+a24
itot += 1
data_index.set_index('name', inplace=True)
out['index_data'] = data_index
return out
def parse_facs_plate_sony(plate):
import glob
import fcsparser
out = {}
fdn = '../../data/MACAFACS/index_fcs_*/'
glob_fcs = fdn+'*'+plate+'*.fcs'
fn_fcs = glob.glob(glob_fcs)
if len(fn_fcs) == 0:
raise IOError('FCS file not found')
if len(fn_fcs) > 1:
raise IOError('Multiple FCS files found')
fn_fcs = fn_fcs[0]
glob_index = fdn+'*'+plate+'*_Index.csv'
fn_index = glob.glob(glob_index)
if len(fn_index) == 0:
raise IOError('Index file not found')
if len(fn_index) > 1:
raise IOError('Multiple index files found')
fn_index = fn_index[0]
meta, data = fcsparser.parse(fn_fcs, reformat_meta=True)
out['fcs_meta'] = meta
out['fcs_data'] = data
data_index = | pd.read_csv(fn_index, sep=',', index_col='Index') | pandas.read_csv |
# -*- coding: utf-8 -*-
import logging
import os
from collections import Counter
from multiprocessing.dummy import Pool as ThreadPool
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from src.globalVariable import GlobalVariable
pd.options.display.float_format = '{0:.3}'.format
class PreferenceAnalytics:
def __init__(self, users_preferences_df, song_df):
self.__logger = logging.getLogger(__name__)
self.__users_preferences_df = users_preferences_df
self.__songs_relevance_df = pd.DataFrame()
self.__users_relevance_df = pd.DataFrame()
self.__song_df = song_df
self.__songs_std_value = 0.0
self.__songs_max_value = 0.0
self.__songs_min_value = 0.0
self.__users_std_value = 0.0
self.__users_max_value = 0.0
self.__users_min_value = 0.0
self.__songs_mean_value = 0.0
self.__users_mean_value = 0.0
self.__songs_median_value = 0.0
self.__users_median_value = 0.0
self.__path_to_save_graphics = 'results/'
if not os.path.exists(self.__path_to_save_graphics):
os.makedirs(self.__path_to_save_graphics)
# Users Methods
def _user_calc(self, users_df):
for index, row in users_df.iterrows():
users_df.at[index, 'global_relevance'] = True if row['total_liked'] >= self.__users_std_value else False
return users_df
def users_make_global_relevance(self, users_count_df):
self.__logger.info("__ Begin: users_make_global_relevance")
pool = ThreadPool(GlobalVariable.processor_number)
users_relevance_df = pool.map(self._user_calc, np.array_split(users_count_df, GlobalVariable.processor_number))
pool.close()
pool.join()
self.__logger.info("__ End: users_make_global_relevance")
return | pd.concat(users_relevance_df, sort=False) | pandas.concat |
import pandas
#DataFrame is an object that holds data, this is also called data structure..
df1=pandas.DataFrame([[2,4,6],[10,20,30]])
print(df1)
print("\n")
#Adding column names to data frame
df1=pandas.DataFrame([[2,4,6],[10,20,30]],columns=["Price","Age","Value"])
print(df1)
print("\n")
#Adding index name to data frame
df1= | pandas.DataFrame([[2,4,6],[10,20,30]], columns=["Price","Age","Value"], index=["First","Second"])
print(df1) | pandas.DataFrame |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
pdt.assert_frame_equal(expected, output)
output = anonym_small.categorical_fake({'name': 'name_female'},
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake({'ssn': 'ssn', 'web': 'url'},
seed=42,
inplace=False)
expected = pd.DataFrame({'ssn': {0: '655-15-0410', 1: '760-36-4013'},
'web': {0: 'http://www.hill.net/',
1: 'http://johnson.com/'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_fake_auto(anonym_small):
output = anonym_small.categorical_fake_auto(seed=42, inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'},
'ssn': {0: '655-15-0410', 1: '760-36-4013'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_resampling(anonym_small):
output = anonym_small.categorical_resampling('name',
inplace=False,
seed=42)
expected = pd.Series(['Bruce', 'Tony'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_resampling(['web', 'ssn'],
seed=2,
inplace=False)
expected = pd.DataFrame({'web':
{0: 'http://www.alandrosenburgcpapc.co.uk',
1: 'http://www.alandrosenburgcpapc.co.uk'},
'ssn': {0: '656564664', 1: '343554334'}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_tokenization(anonym_small):
output = anonym_small.categorical_tokenization('name',
key='test',
inplace=False)
expected = pd.Series(['45fe1a783c', 'bda8a41313'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_tokenization(['web', 'ssn'],
key='test',
inplace=False)
expected = pd.DataFrame({'web': {0: 'e667d84f37', 1: '986a819ea2'},
'ssn': {0: '0f7c17cc6f', 1: 'f42ad34907'}})
pdt.assert_frame_equal(expected, output)
def test_categorical_email_masking(anonym_small):
output = anonym_small.categorical_email_masking('email', inplace=False)
expected = pd.Series(['<EMAIL>', '<EMAIL>'])
| pdt.assert_series_equal(expected, output, check_names=False) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 16:10:09 2019
@author: andreypoletaev
"""
import numpy as np
import pandas as pd
import freud
from scipy.spatial import Voronoi
from matplotlib import pyplot as plt
import matplotlib as mpl
from colorsys import rgb_to_hls, hls_to_rgb
from scipy.signal import butter, filtfilt
from scipy.optimize import root_scalar
from scipy.optimize import curve_fit as cf
from scipy.special import erf, gamma
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
from timeit import default_timer as timer
from datetime import datetime as dt
import crystal_utils as cu
import networkx as nx
from itertools import chain, repeat
from os import path
from glob import glob
from re import split
from batlow import cm_data as batlow_cm
kb = 8.617e-05 ## [eV/Kelvin]
# =============================================================================
# %% cosmetic things
# =============================================================================
## colors for mobile ions
metal_colors = {'Na':'tab:orange', 'K':'#7d02d4', 'Ag':'tab:red',
'Li':'tab:green'}
## perceptually uniform color map, Crameri, F. Scientific Colour Maps, http://www.fabiocrameri.ch/colourmaps (2020).
## (and the Nat Comms paper, doi.org/10.1038/s41467-020-19160-7 )
# batlow_map = LinearSegmentedColormap.from_list('batlow', batlow_cm)
batlow_cdict = {'red' : np.array(batlow_cm)[:,0],
'green' : np.array(batlow_cm)[:,1],
'blue' : np.array(batlow_cm)[:,2]}
batlow_hls = [rgb_to_hls(*i) for i in batlow_cm]
def batlow_lightness_scaled(min_l=batlow_hls[0][1], max_l=batlow_hls[-1][1]):
''' Adjusts lightness on the otherwise perceptually uniform colormap.
Returns in rgb format. '''
linear_lightnesses = np.linspace(batlow_hls[0][1], batlow_hls[-1][1], 256)
nonlinearity = [ i[1]-j for i, j in zip(batlow_hls, linear_lightnesses)]
scaling = abs(max_l - min_l) / abs(batlow_hls[-1][1] - batlow_hls[0][1])
new_linear_lightnesses = np.linspace(min_l, max_l, 256)
new_lightnesses = [scaling*n + nll for nll, n in zip(new_linear_lightnesses, nonlinearity)]
return [hls_to_rgb(b[0], l, b[2]) for b,l in zip(batlow_hls, new_lightnesses)]
## dictionary of colors to make a LinearSegmentedColormap
## that is like coolwarm but with a darker middle
cdict = {'blue':[[0., 1., 1.], [0.5,0.6,0.6], [1., 0., 0.]],
'green':[[0., 0., 0.],[0.5,0.6,0.6], [1., 0., 0.]],
'red':[[0., 0., 0.], [0.5,0.6,0.6], [1., 1., 1.]] }
zs = ['z_all', '012', '037', '062', '087']
single_zs = ['012', '037', '062', '087']
dims = {'x':0, 'y':1, 'z':3, 'dx':0, 'dy':1, 'dz':3}
# =============================================================================
# %% a list flattening function for lists of strings (filenames)
# ## flatten returns an iterator (usually sufficient),
# ## flattened makes it into a proper list
# =============================================================================
flatten = lambda l: chain.from_iterable(repeat(x,1) if isinstance(x,str) else x for x in l)
def flattened(nested_list_input):
flat_list_output = list()
for x in flatten(nested_list_input) : flat_list_output.append(x)
return flat_list_output
# =============================================================================
# %% aggregation function for edges of graphs
# ## this is used in plots of correlations, the col is typically 'dt'
# =============================================================================
agg_edges_time = lambda df, col: df.groupby(df[col].apply(lambda x: round(x, 2))).count()
# =============================================================================
# %% running-average function for smoothing curves - especially RDF
# =============================================================================
def running_mean(x,N) :
cumsum = np.cumsum(np.insert(x,0,0))
return (cumsum[N:] - cumsum[:-N])/N
# =============================================================================
# %% define helper functions
# =============================================================================
def s2n(s):
'''
check if string is a number and convert it to a number if so
'''
try :
return int(s)
except ValueError:
try :
return float(s)
except ValueError:
return False
def rot(angle):
'''rotation matrix'''
return np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
def in_bounds(pt, xymin, xymax):
''' check if point is between (xy)_min and (xy)_max
2020/06/13 : added left side equality
This is a general method for any number of dimensions '''
return (sum(pt >= xymin) == len(pt)) & (sum(pt <= xymax) == len(pt))
# =============================================================================
# %% visualization function from freud. This method started here:
# ## freud.readthedocs.io/en/v1.2.0/examples/module_intros/Voronoi-Voronoi.html
# =============================================================================
def draw_voronoi(box, points, cells, nlist=None, color_by_sides=False, ax=None,
draw_points=False, draw_box=False, single_poly_color=None,
cell_numbers=None, cell_text=None, color_by_property=None,
draw_nns=False, skip_polys=False, property_clim=[0, 1],
alpha=0.4, cmap='coolwarm', draw_colorbar=False):
''' This method started out here:
freud.readthedocs.io/en/v1.2.0/examples/module_intros/Voronoi-Voronoi.html
AP added simple flags for: draw_pts, draw_box, coloring by a property,
writing cell numbers next to the plotted polygons.
Distinction between 'points' and 'cells': points are ALL the points (centers
of Voronoi polygons) in the tessellation, while cells are the polygons
(polytopes in freud parlance) that are to be visualized. Therefore,
len(cells) must be <= len(points), otherwise an error will be thrown.
Coloring by a property: pass an iterable, color_by_property, with indices
matching the cells to be plotted, and min/max limits on it via property_clim
Cell numbers: pass an iterable, cell_numbers, of numbers that matches the
length of polygons (input points) to be plotted AND the indices of the cells
(among points) that are getting plotted. Custom labels with cell_text.
'''
## AP adds specifying axes in which to make the plot
if ax is None: ax = plt.gca()
# Draw Voronoi cells
patches = [plt.Polygon(cell[:, :2]) for cell in cells]
patch_collection = mpl.collections.PatchCollection(patches, alpha=alpha,
# edgecolors='black', ## AP took out
facecolors=single_poly_color)
if single_poly_color is not None: ## No color map needed
colors = [1 for cell in cells]
bounds = [1, 1]
patch_collection.set_edgecolor('black')
elif color_by_property is None: ## need to make a color map
if color_by_sides:
colors = [len(cell) for cell in cells] ## original said voro.polytopes for the full graph,
## this allows plotting a specific subset of cells
else: ## choose random colors for all polygons
colors = np.random.permutation(np.arange(len(patches)))
patch_collection.set_edgecolor('black')
# cmap = plt.cm.Set1 ## AP took out
cmap = plt.cm.get_cmap('Set1', np.unique(colors).size) ## this essentially ranks all the cells without checking absolute differences
bounds = np.array(range(min(colors), max(colors) + 2))
## more finagling can be done here to normalize color map if need be
elif color_by_property is not None: ## color by property
if type(color_by_property) == pd.core.series.Series:
colors = [color_by_property.iloc[i] for i, cell in enumerate(cells)] ## normalized below
else :
colors = [color_by_property[i] for i, cell in enumerate(cells)]
bounds = property_clim
cmap = plt.cm.get_cmap(cmap) ## assuming 256 colors resolution
patch_collection.set_array(np.array(colors))
patch_collection.set_cmap(cmap)
patch_collection.set_clim(bounds[0], bounds[-1])
## option to skip the polytopes and only do other stuff
if not skip_polys:
ax.add_collection(patch_collection)
# Draw points
if draw_points:
pt_colors = np.random.permutation(np.arange(len(points))) ## AP
plt.scatter(points[:, 0], points[:, 1], c=pt_colors, s=6) ## AP change to pt_colors
## AP: Write the numbers of polygons, given #cells ≤ #points
## 2020/07/09: comment out the old, add the option for custom text
if cell_numbers is not None:
# for c, cn in zip(cells, cell_numbers):
# ax.text(points[cn, 0], points[cn, 1], cn, fontsize=8)
for i, cn in enumerate(cell_numbers):
text = f'({cn},\n{cell_text[i]})' if cell_text is not None else cn
ax.text(points[cn, 0], points[cn, 1], text, fontsize=8)
plt.xlim((-box.Lx / 2, box.Lx / 2))
plt.ylim((-box.Ly / 2, box.Ly / 2))
## Set equal aspect and draw the box
if draw_box: ## added AP
# ax.set_aspect('equal', 'datalim') ## commented out AP
box_patch = plt.Rectangle([-box.Lx / 2, -box.Ly / 2], box.Lx, box.Ly, alpha=1, fill=None)
ax.add_patch(box_patch)
## Draw nearest-neighbor lines: this is freud 1.2.0 back-compatibility
## For freud 2.2.0, use the below with flag draw_neighbors
if nlist is not None:
bonds = np.asarray([points[j] - points[i] for i, j in zip(nlist.index_i, nlist.index_j)])
box.wrap(bonds)
line_data = np.asarray([[points[nlist.index_i[i]],
points[nlist.index_i[i]] + bonds[i]] for i in range(len(nlist.index_i))])
line_data = line_data[:, :, :2]
line_collection = mpl.collections.LineCollection(line_data, alpha=0.3)
ax.add_collection(line_collection)
## connect nearest neighbors, freud 2.2.0
if draw_nns > 0:
que = freud.locality.AABBQuery(box, points)
query_args = dict(mode='nearest', num_neighbors=draw_nns, exclude_ii=True)
result = list(que.query(points, query_args))
bond_vectors = np.asarray([points[x[1]] - points[x[0]] for x in result])
bond_vectors = box.wrap(bond_vectors)
line_data = [[points[result[i][0]], points[result[i][0]] + bond_vectors[i]] \
for i in range(len(result))]
line_data = np.asarray(line_data)[:, :, :2] ## planarize
line_collection = mpl.collections.LineCollection(line_data, alpha=0.3)
ax.add_collection(line_collection)
# Show colorbar for number of sides
if color_by_sides or draw_colorbar:
cb = plt.colorbar(patch_collection, ax=ax, ticks=bounds, boundaries=bounds)
cb.set_ticks(cb.formatter.locs + 0.5)
cb.set_ticklabels((cb.formatter.locs - 0.5).astype('int'))
cb.set_label("Number of sides", fontsize=12)
# =============================================================================
# %% generating a lattice of repeating points for making the Voronoi grid
# ## Deprecated: the grid is generated from the crystal structure file
# =============================================================================
def gen_grid(pt0, a1, a2, angle=0, angle2=np.pi/6, xymin=np.array([-0.05,-0.1]), xymax=np.array([1.05,1.15])):
''' generate array of points within bounds '''
## initiate list of points
xs = [pt0[0]]
ys = [pt0[1]]
va1 = np.dot(rot(angle), np.array([a1,0]))
va2 = np.dot(np.dot(rot(angle),rot(angle2)), np.array([a2,0]))
numa1 = (xymax-xymin) / va1
numa2 = (xymax-xymin) / va2
zeroa1 = (pt0-xymin) / va1
zeroa2 = (pt0-xymin) / va2
for a1i in np.round(np.arange(-max(zeroa1)*5,max(numa1-zeroa1)*5)) : ## x direction
for a2i in np.round(np.arange(-max(zeroa2)*5,max(numa2-zeroa2)*5)) : ## y direction
pt = pt0 + a1i * va1 + a2i * va2
if in_bounds(pt,xymin,xymax):
xs.append(pt[0])
ys.append(pt[1])
return xs, ys
# =============================================================================
# %% check neighbors with arbitrary connection indices
# ## this is only relevant for older versions of freud.
# ## DEPRECATED; was used with freud 1.2.0 back in 2019
# =============================================================================
def are_neighbors(cell1s, cell2s, index_i, index_j):
'''
return true if cell2s are nearest-neighbors of cell1s,
using indices index_i and index_j
Note that indices are arbitrary and could be 2-nearest, 1-nearest, or else
'''
assert len(cell2s) == len(cell1s), 'cell1s and cell2s must be same length'
return [cell2s[k] in index_j[np.where(index_i==cell1s[k])] for k in range(len(cell1s)) ]
# =============================================================================
# %% make dictionaries for distances to oxygens and for path lengths
# =============================================================================
def edge_distances_to_oi(folder='.', zs=['z_all', '012', '037', '062', '087'],max_r=4):
''' returns dict of (max_r+1) x (max_r+1) matrices with counts of edges
aggregated by their distances to Oi sites. max_r must be passed as input
max_r can be calculated from the "old_ox_r" or "new_ox_r" columns
2020/06/16: saving time, save a set of distances for every folder,
and first try to look it up and load it'''
distances = dict()
for zz in zs:
try: distances[zz] = np.loadtxt(folder+'/edge dists {}.csv'.format(zz)).astype(int)
except:
## calculate from paths and Oi distances
try:
dists = np.zeros((max_r+1,max_r+1))
oi_sites = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz)).astype(int)
paths = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
# print('loaded site-to-site paths')
for edge in np.argwhere(paths==1): ## count all 1-NN paths
dists[min(paths[oi_sites,edge[0]]),min(paths[oi_sites,edge[1]])] += 0.5 if edge[0] != edge[1] else 1
# print('processed all edges')
## remove zeros as this will be divided by
for i in range(len(dists)):
for j in range(len(dists[0])):
if dists[i,j] == 0: dists[i,j] = 1
# print('removed zeros')
## assign dictionary
distances[zz] = dists
## save
np.savetxt(folder+'/edge dists {}.csv'.format(zz),distances[zz],fmt='%d',delimiter=',')
except: print(f'missing paths or Oi cells at z = {zz}')
return distances
def sites_by_distance(folder='.', zs=['z_all', '012', '037', '062', '087']):
''' returns dict of lists w/indices of sites by their network distance
from the nearest Oi, from within the specific folder.
This is usually where sites_by_r[plane][radius] calls come from '''
sites = dict()
for zz in zs:
try:
oi_sites = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz)).astype(int)
paths = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
path_lengths = sorted(set(flatten(paths)))
_ = path_lengths.pop() ## take out longest
sites[zz] = [[] for p in range(max(path_lengths)+2)] ## could in principle throw an error
for s in range(len(paths)) : ## each path is a square matrix
sites[zz][min(paths[oi_sites,s])].append(s)
for r in range(len(sites[zz])): ## prune each from end
if not sites[zz][-1] : sites[zz] = sites[zz][:-1]
except: print(f'something missing for z = {zz}')
return sites
def BR_sites(folder='.', zs=['z_all', '012', '037', '062', '087']) :
''' retrieves (from pre-saved files) the 1D arrays with network indices of
Beevers-Ross sites for beta-aluminas. All this is only
in reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
BR_dict = dict()
for zz in zs:
try:
BR_dict[zz] = np.loadtxt(folder+'/sites BR {}.csv'.format(zz),delimiter=',')
except: print('missing BR sites at z = {zz} in folder {folder}')
return BR_dict
def site_paths(folder = '.', zs=['z_all', '012', '037', '062', '087']):
''' retrieves number_of_polygons x number_of_polygons arrays with network path lengths
between mobile-ion sites from pre-saved files. All this is only in
reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
paths = dict()
for zz in zs:
try:
paths[zz] = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
except: print(f'missing paths at z = {zz}')
return paths
def o_sites(folder = '.', zs=['z_all', '012', '037', '062', '087']):
''' retrieves (from pre-saved files) the 1D arrays with network indices of sites
that have an edge that is occupied by an O_interstitial. All this is only
in reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
paths = dict()
for zz in zs:
try:
paths[zz] = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz),delimiter=',').astype(int)
except: print(f'missing O sites z = {zz}')
return paths
# =============================================================================
# %% load a list of hops for a plane nicely
# ## update for adding reverse column if it is not already there
# ## (it was recorded by ion and in simulation-chronological order)
# ## 2020/04/07: adding oxygen path input as 'old_ox_r', 'new_ox_r'
# ## 2020/06/13: adding the do_update flag
# =============================================================================
def load_plane(path, numpolys=200, numplanes=1, verbose=True, do_update=True,
oxygen_path=None, paths_path=None, BR=[]):
total_hops = pd.read_csv(path); save_update = False
zz = which_one_in(zs,path)
if 'new_mg_r' in total_hops.columns:
do_update=False
save_update=False
## check reverse column and update if needed
if do_update and 'rev_hop' not in total_hops.columns:
print('\nupdating reverse hops column...')
total_hops = total_hops.assign(rev_hop = np.zeros(len(total_hops)))
num_ions = len(total_hops.ion.unique())
for ii, ion in enumerate(total_hops.ion.unique()):
one_ion_hops = total_hops.loc[total_hops.ion == ion]
## account for non-hops
if len(one_ion_hops) > 1:
rev = list((one_ion_hops.new_cell[1:].values == one_ion_hops.old_cell[:-1].values))
rev.append(np.nan) ## last hop is unknown
else : rev = [np.nan]
total_hops.rev_hop.loc[total_hops.ion == ion] = rev
if ii % 25 == 0: print(f'{ii} of {num_ions} ions done')
save_update = True; print('... done and onwards.')
elif verbose: print('\nupdate complete: reverse hopping.')
## check residence time of previous hop for correlations
if do_update and 'old_resid_time' not in total_hops.columns :
print('updating old residence time column...')
total_hops = total_hops.assign(old_resid_time = np.zeros(len(total_hops)))
num_ions = len(total_hops.ion.unique())
for ii, ion in enumerate(total_hops.ion.unique()):
one_ion_hops = total_hops.loc[total_hops.ion == ion]
## account for non-hops
if len(one_ion_hops) > 1:
old = list(one_ion_hops.new_resid_time[:-1])
old.insert(0,one_ion_hops.time.iloc[0]) ## 1st hop unknown - and that is OK
else : old = [np.nan]
total_hops.old_resid_time.loc[total_hops.ion == ion] = old
if ii % 25 == 0: print(f'{ii} of {num_ions} ions done')
save_update = True; print('... done and onwards.')
elif verbose: print('update complete: old residence time.')
## add columns on the distance to nearest oxygen to all planes
while do_update and ('old_ox_r' not in total_hops.columns or 'new_ox_r' not in total_hops.columns) :
if oxygen_path is None or paths_path is None:
print('distances to Oi missing; add oxygen_path=... and paths_path=... to update')
break ## out of the while loop
elif oxygen_path == 'no interstitials' :
total_hops['new_ox_r'] = 100
total_hops['old_ox_r'] = 100
break
print('updating distances to oxygens...')
oi_sites = np.loadtxt(oxygen_path).astype(int)
paths = np.loadtxt(paths_path,delimiter=',').astype(int)
## add columns
total_hops['new_ox_r'] = total_hops.new_cell.apply(lambda x: min(paths[oi_sites,x]))
total_hops['old_ox_r'] = total_hops.old_cell.apply(lambda x: min(paths[oi_sites,x]))
## save & update
save_update = True; print('... done and onwards.')
if 'old_ox_r' in total_hops.columns and 'new_ox_r' in total_hops.columns and verbose :
if not save_update: print('update complete: distances to oxygens.')
## add the BR column here - if beta
if 'old_is_BR' not in total_hops.columns and do_update :
print('updating BR site column...')
total_hops['old_is_BR'] = total_hops.old_cell.isin(BR[zz])
## save & update
save_update = True; print('... done and onwards.')
elif verbose: print('update complete: BR sites (hop origin).')
if save_update: ## save all updates at once
print('saving updated hops...')
total_hops.to_csv(path,index=False)
## proceed to actually load stuff
all_residences = total_hops.groupby('new_cell').mean().new_resid_time
# new_cells = total_hops.new_cell.unique()
# old_cells = total_hops.old_cell.unique()
non_hops = total_hops.loc[total_hops.new_cell == total_hops.old_cell]
# empties = set(range(numpolys*numplanes))-set(new_cells)-set(old_cells)
empties = set(range(numpolys*numplanes))-set(list(all_residences.index))
non_hop_sites = list(non_hops.new_cell.unique())
## take out the placeholders for the ions that do not hop
## those were recorded as one hop with new_cell == old_cell
total_hops = total_hops.loc[total_hops.new_cell != total_hops.old_cell]
if verbose:
print('\n{} ions hopped {} times, {} ions stayed put'.format(
len(total_hops.ion.unique()),len(total_hops), len(non_hops)))
if 'old_is_BR' in total_hops.columns:
print('{} hops from BR sites, {} from aBR sites'.format(
len(total_hops.query('old_is_BR == True')),
len(total_hops.query('old_is_BR == False'))))
print('{} total onwards hops, {:.1f}% hops reversed'.format(
total_hops.rev_hop.loc[total_hops.rev_hop==False].size,
total_hops.rev_hop.loc[np.isnan(total_hops.rev_hop)==False].mean()*100.))
print(f'{numpolys*numplanes} sites, {len(empties)} remained empty')
else :
print('...plane loaded')
return total_hops, all_residences, non_hops, empties, non_hop_sites
# =============================================================================
# %% combine planes after updating
# ## skipping the graphics for a moment
# =============================================================================
def combine_planes3(plane_paths, zs, numpolys=200, verbose=False):
combined_hops = pd.DataFrame()
## sort the inputs by z, while keeping them together
all_inputs = list(zip(plane_paths, zs))
all_inputs = sorted(all_inputs, key = lambda x: x[1])
if verbose: print(f'combining {len(all_inputs)} planes')
for i, (plane, z) in enumerate(all_inputs):
th, _, nh, _, _ = load_plane(plane, numpolys=numpolys, verbose=True)
these_hops = pd.concat([th,nh],ignore_index=True)
these_hops.new_cell += i * numpolys
these_hops.old_cell += i * numpolys
these_sites = set(these_hops.new_cell.unique()).union(set(these_hops.old_cell.unique()))
combined_hops = pd.concat([combined_hops, these_hops],ignore_index=True)
if verbose:
# print(f'ions at z={z:03d} :', these_hops.ion.unique())
print('sites from {} to {}, {} total\n'.format(min(these_sites),max(these_sites),len(these_sites)))
return combined_hops
# =============================================================================
# %% low-pass filter
# =============================================================================
def lopass(signal, cutoff, sampling_freq, order = 5):
nyquist = sampling_freq / 2
b, a = butter(order, cutoff/nyquist)
if not np.all(np.abs(np.roots(a)) < 1):
raise ValueError('Filter with cutoff {} unstable with '
'sampling frequency {}'.format(cutoff, sampling_freq))
filtered = filtfilt(b, a, signal, method='gust')
return filtered
# =============================================================================
# %% functions for traversing lists
# =============================================================================
def which_one_in(l, f):
"""
returns which one element of list l is in f, otherwise None
"""
included = [i for i in l if str(i) in f]
if len(included) == 1:
return included[0]
elif len(included) > 1:
return False
else:
return None
# =============================================================================
# %% simple exponential decay with x0 = 0, and baseline
# =============================================================================
def exp_decay(x, c0, tau, c1=0):
return c0 * np.exp(-x/tau) + c1
def exp_decay_cumsum(x, c0, tau, c1=0, c2=0):
return np.cumsum(c0 * np.exp(-x/tau) + c1)+c2
# =============================================================================
# %% double exponential decay with x0 = 0, and baseline
# =============================================================================
def two_exp_decay(x, c0, c1, tau0, tau1, y0=0):
return y0 + exp_decay(x, c0, tau0) + exp_decay(x, c1, tau1)
def two_exp_decay_cumsum(x, c0, c1, tau0, tau1, y0=0, y1=0):
return np.cumsum(y0 + exp_decay(x, c0, tau0) + exp_decay(x, c1, tau1)) + y1
# =============================================================================
# %% KWW stretched exponential decay with x0 = 0, and baseline
# =============================================================================
def kww_decay(x, c0, tau, beta=1., c1=0):
return c0 * np.exp(-(x/float(tau))**float(beta)) + c1
# =============================================================================
# %% an erf rise wrapper for fitting functions
# =============================================================================
def rising_exp_decay(x, c0, tau, c1=0, x0=0, rt=np.inf):
return exp_decay(x,c0,tau,c1) * erf(rt*(x-x0))
def rising_exp_decay_cumsum(x, c0, tau, c1=0, c2=0, x0=0, rt=np.inf):
return np.cumsum(exp_decay(x,c0,tau,c1) * erf(rt*(x-x0))) +c2
def rising_two_exp_decay(x, c0, c1, tau0, tau1, y0=0, x0=0, rt=np.inf):
return two_exp_decay(x, c0, c1, tau0, tau1, y0) * erf(rt*(x-x0))
def rising_two_exp_decay_cumsum(x, c0, c1, tau0, tau1, y0=0, y1=0, x0=0, rt=np.inf):
return np.cumsum(two_exp_decay(x, c0, c1, tau0, tau1, y0) * erf(rt*(x-x0))) + y1
# =============================================================================
# %% KWW stretched exponential decay with x0 = 0, and baseline, plus:
# ## tail stretch turns on at x=tstar, returns simple exponential for x<tstar
# =============================================================================
def kww_decay_break(x, c0, tau, tstar=0, beta=1., c1=0):
simple = exp_decay(x[x<tstar],c0,tau,c1)
c02 = exp_decay(tstar, c0, tau, 0)/kww_decay(tstar, 1., tau, beta, 0)
stretched = kww_decay(x[x>=tstar], c02, tau, beta, c1)
# print x[x<tstar], simple
# print x[x>=tstar]-tstar, stretched
# return np.array( [(simple[i], stretched[i])[x[i]>tstar] for i in range(len(x))] )
return np.concatenate((simple, stretched), axis=None)
def kww_decay_cumsum(x, c0, tau, tstar=0, beta=1., c1=0, c2=0):
simple = exp_decay(x[x<tstar],c0,tau,c1)
c02 = exp_decay(tstar, c0, tau, 0)/kww_decay(tstar, 1., tau, beta, 0)
stretched = kww_decay(x[x>=tstar], c02, tau, beta, c1)
# print x[x<tstar], simple
# print x[x>=tstar]-tstar, stretched
# return np.array( [(simple[i], stretched[i])[x[i]>tstar] for i in range(len(x))] )
return np.cumsum(np.concatenate((simple, stretched), axis=None))+c2
# =============================================================================
# %% Mittag-Leffler function and a wrapper to fit to a lifetime with it
# =============================================================================
def mittag_leffler(x,a,b,terms):
'''
Computes the Mittag-Leffler function:
E_a,b(x) = sum_0^terms x**term / gamma(a*term+b)
This is typically called with x = -(t/tau)**a, hence the wrapper below.
Convergence is limited to when the function is not too small and the value
of the argument x is not too large. 100-200 terms typically gives precision
that is good enough for any practical application including fitting. This
translates to a reliable range of values for the ML function from around
1 (argument near zero) to 0.01 at best. If you think you need to compute
the ML function when it is small (e.g. 1e-3), most likely an approximation
will work just as fine.
Sokolov & Klafter in "First Steps in Random Walks" set b=1.
The Havriliak-Negami relaxation's FT is close to the ML function, but not
exact; Cole-Cole relaxation has b=1 - but still other terms in front too.
Parameters
----------
x : array or number
The argument of the function.
a : numerical
Typical first parameter of the Mittag-Leffler function.
b : numerical
Typical first parameter of the Mittag-Leffler function.
terms : int
The number of terms to compute for the series. Exact is infinity.
100-200 is typically sufficient, using a number higher than that may
hamper convergence.
Returns
-------
output
Same dimensionality as x.
'''
return np.sum(np.array([x**k/gamma(k*a+b) for k in range(terms)]), axis=0)
def mittag_leffler_wrapper(x, tau, a, b, terms):
return mittag_leffler(-(x/tau)**a, a, b, terms)
# =============================================================================
# %% make freud Voronoi & NearestNeighbors objects from a grid of points (x,y)
# ## this method assumes that the box is square
# ## This is copied from crystal_utils on 2020/07/23
# =============================================================================
def make_voronoi(grid_path, L=1., z=0, debug=False):
'''input: grid_path is the (relative) path to the file with grid points'''
pts = pd.read_csv(grid_path)
gridpts = np.array([pts.x, pts.y]).T
print(f'read {len(gridpts)} grid points')
## help_vor is the inverse of unit cells.
## Vertices are maxima of mobile ion probability density
help_vor = Voronoi(gridpts)
site_pts = []
for i in range(len(help_vor.vertices[:, 0])):
if in_bounds(help_vor.vertices[i, :], np.array([0, 0]), np.array([L, L])):
site_pts.append((help_vor.vertices[i, 0] - 0.5 * L, help_vor.vertices[i, 1] - 0.5 * L, 0))
## remove duplicates around the edges of the box if needed,
## this is using brute force
to_remove = [];
thresh = L * 1e-4
new_site_pts = [];
for i, pt1 in enumerate(site_pts):
if i in to_remove: continue
for j, pt2 in enumerate(site_pts[i + 1:]):
if L - abs(pt1[0] - pt2[0]) < thresh and abs(pt1[1] - pt2[1]) < thresh:
# print pt1, pt2, i, j+i+1
to_remove.append(j + i + 1)
elif L - abs(pt1[1] - pt2[1]) < thresh and abs(pt1[0] - pt2[0]) < thresh:
# print pt1, pt2, i, j+i+1
to_remove.append(j + i + 1)
new_site_pts.append(pt1)
print(f'{len(site_pts)} points in bounds, removing {len(to_remove)} of them')
site_pts = np.asarray(new_site_pts)
box = freud.box.Box.square(L)
site_pts = box.wrap(site_pts)
site_vor = freud.locality.Voronoi(box, 0.5 * L)
site_vor.compute(system=(box, site_pts))
numpolys = len(site_vor.polytopes)
if debug: draw_voronoi(box, site_pts, site_vor.polytopes, draw_points=True,
cell_numbers=range(len(site_vor.polytopes)))
## points at centers of sites, approx. corresponding to oxygen locations
help_verts = site_vor.polytopes
these_sites = []
for s in help_verts:
for sh in s:
these_sites.append([sh[0], sh[1], 0]) ## append avg z given that z was previously lost
help_verts = np.asarray(these_sites)
## initialize nearest-neighbor object
# nn = freud.locality.NearestNeighbors(0.2, 4, 1.05)
return box, site_vor, site_pts, numpolys, help_verts
# =============================================================================
# %% TODO: plot multiple Voronoi lattices from a multi-plane file with hops
# =============================================================================
def draw_lattices():
return False
# =============================================================================
# %% count and output re-fill times for a single or composite plane
# =============================================================================
def count_fill_times(plane_data, numpolys):
## sort by time for calculating fill times below
plane_data.sort_values(by='time',axis=0,inplace=True,ascending=True)
plane_data.reset_index(inplace=True)
plane_data.drop(['index'],axis=1,inplace=True)
#numsites = len(total_hops.new_cell.unique()) ## use numpolys instead
fill_sites = np.zeros((numpolys,2)) ## keep track of which ion was here last
fill_times = list() ## keep track of which site gets which time to tell BR/a-BR apart
for i, r in plane_data.iterrows():
## count the non-hop with its (long) residence time
if r.old_cell == r.new_cell :
fill_times.append(np.array([r.new_resid_time,int(r.old_cell),np.nan]))
continue
## restart old site's counter. Even if not onwards hop, new ion could fill
## the site before the original returns
fill_sites[int(r.old_cell),:] = np.array([r.time, int(r.ion)])
## append new site's fill time with now minus last hop out
if np.array_equal(fill_sites[int(r.new_cell),:], np.array([0,0])) : ## the first hop into new site
## count the fill time of the initial site starting at zero
fill_times.append(np.array([r.time,int(r.old_cell),np.nan]))
else :
## flag back-hops / re-fills immediately
## 3rd column is True for re-fills
fill_times.append([r.time-fill_sites[int(r.new_cell),0],int(r.new_cell),r.ion==fill_sites[int(r.new_cell),1]])
## more conditions to go here
## TODO: add the fill time from last hop into new site to end of simulation
if (r.rev_hop != 1.) & (r.rev_hop != 0.):
fill_times.append([r.new_resid_time,int(r.new_cell),np.nan])
if not i % int(len(plane_data)/20) : print(f'{100*i/len(plane_data):.0f}% done')
fill_times = pd.DataFrame(data=fill_times, columns=['time','site','refill'])
fill_times.time = np.round(fill_times.time,3)
fill_times.site = fill_times.site.astype(int)
return fill_times
# =============================================================================
# %% claculate occupancy of high-energy site based on T and fixed stoichiometry
# ## assuming a two-state system with each level having Nsite sites
# ## assuming Nion total ions, of which Nexc are excited to the higher level
# =============================================================================
def two_state_occup(Nion, Nsite, energy, T):
''' energy [eV], T [K] '''
lb=max(0,Nion-Nsite)
ub=Nion/2.
f = lambda Nexc : (Nsite-Nexc)*(Nion-Nexc) - np.exp(energy/kb/T)*(Nsite-(Nion-Nexc))*Nexc
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate energy from occupancy
# ## assuming Nion total ions, of which Nexc are excited to the higher level
# ## and 2 levels with Nsite sites each; energy is in eV based on kb
# =============================================================================
def two_state_energy(Nion, Nsite, Nexc, T, lb=0., ub = 1.):
''' bounds [eV], T [K] '''
assert (Nexc <= Nion / 2.) or (lb < 0.), 'N excited > 50%, T > inf @ energy > 0'
assert Nexc > 0., 'N excited should probably be > 0, or else T < 0'
f = lambda energy : (Nsite-Nexc)*(Nion-Nexc) - np.exp(energy/kb/T)*(Nsite-(Nion-Nexc))*Nexc
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate occupancy from energy for a 2-state model w/ distinct #s o/sites
# ## (i.e. degeneracies of the levels 'g' for ground and 'e' for excited)
# ## assuming levels with degeneracy Ns_g and Ns_e, and N_i total ions
# =============================================================================
def two_state_nondeg_occupancy(Ns_g, Ns_e, Nion, energy, T):
''' energy [eV], T [K] '''
assert Nion < Ns_g+Ns_e, 'too many ions for {} total sites: {}'.format(Ns_g+Ns_e, Nion)
lb=max(0,Nion-Ns_g) ## minimum number of ions in excited level
ub=float(Ns_e)/(Ns_e+Ns_g) ## toward inf T, all levels have same occupancy
f = lambda Nexc : (Nion-Nexc)*(Ns_e - Nexc) - np.exp(energy/kb/T)*Nexc*(Ns_g-(Nion-Nexc))
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate energy from occupancy for a 2-state model w/ distinct #s of sites
# ## (i.e. degeneracies of the levels 'g' for ground and 'e' for excited)
# ## assuming levels with degeneracy Ns_g and Ns_e, and N_i total ions
# =============================================================================
def two_state_nondeg_energy(Ns_g, Ns_e, Ni_g, Ni_e, T, lb=0., ub = 5.):
''' bounds [eV], T [K] '''
assert 0 < Ni_g < Ns_g, 'weird ground state: {:.2f} in {}'.format(Ni_g, Ns_g) ## strict <
assert 0 < Ni_e < Ns_e, 'weird excited state: {:.2f} in {}'.format(Ni_e, Ns_e) ## strict
f_g = float(Ni_g) / float(Ns_g) ## fraction of filled ground-state sites
f_e = float(Ni_e) / float(Ns_e) ## fraction of filled excited-state sites
# f = lambda energy : f_g*(1.-f_e)*Ns_g*Ns_e - np.exp(energy/kb/T)*f_e*(1.-f_g)*Ns_g*Ns_e
f = lambda energy : f_g*(1.-f_e) - np.exp(energy/kb/T)*f_e*(1.-f_g)
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% multi-method wrapper for calculating expectation values over distributions
# ## assumes that the 'dist' passed on is already a Series, not DataFrame
# =============================================================================
def expectation_multi_method(dist, method, aggregated=False, **kwargs):
if method == 'mean':
return dist.mean(), dist.std()
else:
if not aggregated :
## make pdf & cdf
freqs = dist.groupby(dist).agg('count').pipe(pd.DataFrame).rename(columns = {dist.name: 'frequency'})
freqs['pdf'] = freqs['frequency'] / sum(freqs.frequency)
freqs['cdf'] = freqs.pdf.cumsum()
# print(freqs.head())
## create PDF from a distribution
times = freqs.loc[(freqs.index.values > 1.) & (freqs.index.values < 500)].index.values
pdf = freqs.loc[(freqs.index.values > 1.) & (freqs.index.values < 500)].pdf.values
pdf_ub = freqs.pdf.min()
else:
times = dist.index.values
pdf = dist.values.reshape(-1)
pdf_ub = pdf.min()/100 if pdf.min() > 0 else 1e-8
## fit simple exponential time to PDF. Format: [pre-exponent, tau, constant offset]
ub = [1000., 1e5, pdf_ub] ## hard limit: 1e-7
lb = [1e-4, 1e-3, 0]
p0 = [1e-2,3,1e-15]
try:
popt, pcov = cf(exp_decay,times,pdf, p0=p0, bounds = (lb, ub))
perr = np.sqrt(np.diag(pcov))
except ValueError:
print('fitting one exponential did not work, trying a faster decay')
# popt, pcov = cf(exp_decay,times,pdf, p0=[1e-2,0.5,1e-10], bounds = (lb, ub))
# perr = np.sqrt(np.diag(pcov))
popt = p0
if method == 'simple' :
if 'verbose' in kwargs.keys() and kwargs['verbose'] : return popt, perr
else : return popt[1], perr[1]
else:
## fit stretch tail with a break, p0 = [c0, tau, tstar, beta=1, c1=0]
ub = [1000., popt[1]*100., 2000., 1, pdf_ub] ## hard limit: 1e-7
lb = [1e-4, popt[1]*0.1, 0.1, 0, 0]
p0=[1e-2,popt[1],5,0.9,1e-15]
# print('lb:', lb)
# print('p0:', p0)
# print('ub:', ub)
popt, pcov = cf(kww_decay_break,times,pdf,
p0=p0, bounds = (lb, ub),
max_nfev=1e4)
perr = np.sqrt(np.diag(pcov))
if 'verbose' in kwargs.keys() and kwargs['verbose'] : return popt, perr
else : return popt[1], perr[1]
# =============================================================================
# $$ functions to query z
# =============================================================================
# =============================================================================
# %% correlation factor
# =============================================================================
def avg_cos_hop(rev_hops):
''' honeycomb lattice <cos theta> '''
return (-1. * len(rev_hops[rev_hops==True]) + 0.5 * len(rev_hops[rev_hops==False]))/len(rev_hops)
def corr_factor(rev_hops):
cos_theta = avg_cos_hop(rev_hops)
return (1.+cos_theta)/(1.-cos_theta)
# =============================================================================
# %% parse LAMMPS output with multiple RDFs
# ## file structure: 2 lines of comments, then each step with number-of-rows,
# ## then that many rows: center of bin, then rdf, then coordination
# ## TODO: merge this with the standalone parse_rdf file
# =============================================================================
def parse_rdf(filepath):
bins = list()
steps = list()
first_rdf = list()
with open(filepath, 'r') as fp:
## read the first three sets of comments
line = fp.readline() #; print line[:-1]
line = fp.readline() #; print line[:-1]
line = fp.readline() #; print line[:-1]
## read first line with first time step and number-of-rows
## take off the endline character and split
line = fp.readline()
arr = line[:-1].split(' ')
steps.append(arr[0])
numrows = s2n(arr[1])
## get first set of bins
for i in range(numrows):
line= fp.readline()
arr = line[:-1].split(' ')
bins.append(s2n(arr[1]))
first_rdf.append(s2n(arr[2]))
## skip cdf / coordination
## check
# print len(bins), len(first_rdf)
## make a pandas dataframe
dfdict = {'bins':np.array(bins), '{}'.format(steps[-1]):first_rdf}
df = pd.DataFrame(data = dfdict)
## read next time step
line = fp.readline()
## loop through all other sets
while(line) :
## parse line with new time step
arr = line[:-1].split(' ')
steps.append(arr[0])
numrows = s2n(arr[1])
rdf = list()
bins = list()
for i in range(numrows):
line= fp.readline()
arr = line[:-1].split(' ')
bins.append(s2n(arr[1]))
rdf.append(s2n(arr[2]))
## skip cdf / coordination
df['{}'.format(steps[-1])] = np.array(rdf)
## check
# if int(steps[-1]) % 1000 == 0:
# print 'done {} ps'.format(int(steps[-1])/1000)
## read next time step
line = fp.readline()
return df.set_index('bins')
# =============================================================================
# %% parse non-gaussian parameter output of LAMMPS
# ## TODO: update this as it is deprecated.
# =============================================================================
def parse_a2(fpath):
times = list(); r2s = list(); r4s = list(); a2s = list()
# df = pd.DataFrame(columns=['r2','r4','a2'])
with open(fpath,'r') as a2in:
for i in range(3) : a2in.readline() ## skip header
stepline = a2in.readline()
while stepline:
times.append(int(stepline[:-1].split(' ')[0])/1000.)
r2s.append(float(a2in.readline()[:-1].split(' ')[1]))
r4s.append(float(a2in.readline()[:-1].split(' ')[1]))
a2s.append(float(a2in.readline()[:-1].split(' ')[1]))
# df.iloc[int(step)] = {'r2':r2, 'r4':r4, 'a2':a2}
stepline = a2in.readline()
return pd.DataFrame({'time':times, 'r2':r2s, 'r4':r4s, 'a2':a2s}).set_index('time')
# =============================================================================
# %% calculate occupancies of sites properly
# ## counting method: total old time + final time
# =============================================================================
def site_occupancies(data2, sites, BR_sites, total_time=10000., plane_label=None,
r='some r',verbose=False):
'''
data2: pandas df with columns: time, old_resid_time, new_resid_time, old_is_BR
sites: (sub)set of sites for which to calculate occupancies
BR_sites: which sites are BR
verbose: boolean flag for printing debug statements
plane_label: has fields m, T1 if given;
r: optional for debugging'''
if plane_label is None : m='M'; T1 = 0; ph = 'beta'
else: m = plane_label.metal; T1 = plane_label.T1; ph = plane_label.phase
## columns with distances : new and old
new_r_col = 'new_ox_r' if ph == 'beta' else 'new_mg_count'
old_r_col = 'old_ox_r' if ph == 'beta' else 'old_mg_count'
## downselect plane & sites
data = data2.query(f'{new_r_col} == @r') ## ignores empties, catches non-hops
data_new = data2.query(f'{new_r_col} == @r & new_cell != old_cell') ## ignores empties, catches non-hops
data_old = data2.query(f'{old_r_col} == @r & old_cell != new_cell') ## ignores empties
data_non = data2.query(f'{old_r_col} == @r & {new_r_col} == @r & old_cell == new_cell')
## count sites
old_sites = set(data.old_cell.unique())
new_sites = set(data.new_cell.unique())
# ions = len(data.query('new_cell != old_cell').ion.unique()) ## cosmetic only
## subdivide data manually
data_new_aBR = data_new[~data_new.new_cell.isin(BR_sites)] ## query overcounts non-hops
data_new_BR = data_new[ data_new.new_cell.isin(BR_sites)]
data_old_BR = data_old[ data_old.old_cell.isin(BR_sites)]
data_old_aBR = data_old[~data_old.old_cell.isin(BR_sites)]
## count empty sites: the series for their zero occupancy will be created later
empties = sites - old_sites - new_sites
empties_BR = empties.intersection(BR_sites)
empties_aBR = empties - empties_BR
if verbose: print(f'\n{m} {T1}K r={r}: {len(data_non)} non-hops, {len(empties)} empties')
## non-hops in time - modified 2020/08/01
# time_non_BR = data_non.query('old_is_BR == True ').groupby('new_cell').new_resid_time.sum()
# time_non_aBR = data_non.query('old_is_BR == False').groupby('new_cell').new_resid_time.sum()
time_non_BR = data_non[ data_non.old_cell.isin(BR_sites)].groupby('new_cell').new_resid_time.sum()
time_non_aBR = data_non[~data_non.old_cell.isin(BR_sites)].groupby('new_cell').new_resid_time.sum()
old_BR_time = data_old_BR.groupby('old_cell').old_resid_time.sum()
old_aBR_time = data_old_aBR.groupby('old_cell').old_resid_time.sum()
## adjust for the final time at a final site. Only one sum() b/c each should be only one hop
final_times_BR = data_new_BR.query('rev_hop != True & rev_hop != False ').groupby('new_cell').new_resid_time.sum()
final_times_aBR = data_new_aBR.query('rev_hop != True & rev_hop != False ').groupby('new_cell').new_resid_time.sum()
## add site-specific values using pandas combine, check lengths.
csum = lambda s1, s2 : s1 + s2 ## need a function that takes two series to pass to df.combine()
total_BR = old_BR_time.combine(final_times_BR, csum, fill_value=0)
total_aBR = old_aBR_time.combine(final_times_aBR,csum, fill_value=0)
## add non-hops with pandas append
if not time_non_BR.empty : total_BR = total_BR.append(time_non_BR)
if not time_non_aBR.empty: total_aBR = total_aBR.append(time_non_aBR)
## create series of zeros for empties and append to the main
if empties_BR: total_BR = total_BR.append(pd.Series(data=0,index=empties_BR))
if empties_aBR: total_aBR = total_aBR.append(pd.Series(data=0, index=empties_aBR))
## check lengths and bounds
if verbose:
print(' BR: {} sites, max={:.2f}, min={:.2f}'.format(len(total_BR), total_BR.max(), total_BR.min()))
print('aBR: {} sites, max={:.2f}, min={:.2f}'.format(len(total_aBR), total_aBR.max(), total_aBR.min()))
## add the radius to make it look like it was just done with groupby calls
total_BR = | pd.DataFrame({'total':total_BR/total_time, new_r_col:r, 'site':'BR'}) | pandas.DataFrame |
import datetime
import dspl2
from flask import (
Flask, render_template, request, Response)
from functools import lru_cache
from icu import SimpleDateFormat
from io import StringIO
import json
import os.path
import pandas as pd
from urllib.parse import urlparse
app = Flask(__name__)
@app.route('/')
def main():
return render_template('dspl2viz.html')
@app.route('/api/measures')
def api_measures():
dataset = request.args.get('dataset')
if dataset is None:
return Response("Dataset not specified", status=400)
try:
getter = dspl2.LocalFileGetter(
os.path.expanduser('~/dspl/samples/bls/unemployment/bls-unemployment.jsonld'))
expander = dspl2.Dspl2JsonLdExpander(getter)
ds = expander.Expand(expandSlices=False)
return Response(json.dumps(ds['measure'], indent=2), mimetype='application/json')
except Exception as e:
app.logger.warn(e)
return Response("Unable to find requested dataset", status=404)
@app.route('/api/dimensions')
def api_dimensions():
dataset = request.args.get('dataset')
if dataset is None:
return Response("Dataset not specified", status=400)
try:
getter = dspl2.HybridFileGetter(dataset)
expander = dspl2.Dspl2JsonLdExpander(getter)
ds = expander.Expand(expandSlices=False, expandDimensions=False)
return Response(json.dumps(ds['dimension'], indent=2), mimetype='application/json')
except Exception as e:
app.logger.warn(e)
return Response("Unable to find requested dataset", status=404)
@app.route('/api/dimension_values')
def api_dimension_values():
dataset = request.args.get('dataset')
if dataset is None:
return Response("Dataset not specified", status=400)
dimension = request.args.get('dimension')
if dimension is None:
return Response("Dimension not specified", status=400)
try:
getter = dspl2.HybridFileGetter(dataset)
expander = dspl2.Dspl2JsonLdExpander(getter)
ds = expander.Expand(expandSlices=False, expandDimensions=True)
for dim in ds['dimension']:
if (dimension == dspl2.GetUrl(dim) or
urlparse(dimension).fragment == urlparse(dspl2.GetUrl(dim)).fragment):
return Response(json.dumps(dim, indent=2), mimetype='application/json')
return Response("Unable to find requested dimension", status=404)
except Exception as e:
app.logger.warn(e)
return Response("Unable to find requested dataset", status=404)
@app.route('/api/slices_for_measure')
def api_slices_for_measure():
dataset = request.args.get('dataset')
if dataset is None:
return Response("Dataset not specified", status=400)
measure = request.args.get('measure')
if measure is None:
return Response("Measure not specified", status=400)
try:
getter = dspl2.HybridFileGetter(dataset)
expander = dspl2.Dspl2JsonLdExpander(getter)
ds = expander.Expand(expandSlices=False, expandDimensions=False)
slices = []
for slice in ds['slice']:
for sliceMeasure in slice['measure']:
if (measure == dspl2.GetUrl(sliceMeasure) or
urlparse(measure).fragment == urlparse(dspl2.GetUrl(sliceMeasure)).fragment):
slices.append(slice)
break
return Response(json.dumps(slices, indent=2),
mimetype='application/json')
except Exception as e:
app.logger.warn(e)
return Response("Unable to find requested dataset", status=404)
@lru_cache(maxsize=10)
def _ExpandDataset(dataset):
getter = dspl2.HybridFileGetter(dataset)
expander = dspl2.Dspl2JsonLdExpander(getter)
return expander.Expand()
def _ParseDate(text, date_pattern):
df = SimpleDateFormat(date_pattern)
ts = df.parse(text)
return datetime.datetime.utcfromtimestamp(ts)
@lru_cache(maxsize=100)
def _GetDataSeries(dataset, slice, measure, dimension_value):
dim_val_dict = dict([dim_val.split(':')
for dim_val in dimension_value.split(',')])
ds = _ExpandDataset(dataset)
# Identify the time dimension's date format
dateFormat = "yyyy-MM-dd" # default
for dimension in ds['dimension']:
if dimension['@type'] == 'TimeDimension':
dateFormat = dimension.get('dateFormat')
break
for dsSlice in ds['slice']:
if urlparse(dsSlice['@id']).fragment == urlparse(slice).fragment:
ret = []
for observation in dsSlice['data']:
val = {}
# Slice should have exactly the requested dims + a time dim:
if len(observation['dimensionValues']) != len(dim_val_dict) + 1:
continue
# All the non-time dims should match the filter:
matched_dims = 0
for dim_val in observation['dimensionValues']:
dim_id = urlparse(dim_val['dimension']).fragment
if f'#{dim_id}' in dim_val_dict:
if dim_val.get('codeValue') == dim_val_dict[f'#{dim_id}']:
val[dim_id] = dim_val.get('codeValue')
matched_dims += 1
elif dim_val.get('value'):
val[dim_id] = _ParseDate(dim_val.get('value'), dateFormat)
if matched_dims != len(dim_val_dict):
continue
for meas_val in observation['measureValues']:
if urlparse(meas_val['measure']).fragment == urlparse(measure).fragment:
val[urlparse(measure).fragment] = meas_val['value']
ret.append(val)
return ret
@app.route('/api/series')
def api_series():
dataset = request.args.get('dataset')
if dataset is None:
return Response("Dataset not specified", status=400)
slice = request.args.get('slice')
if slice is None:
return Response("Slice not specified", status=400)
measure = request.args.get('measure')
if measure is None:
return Response("Measure not specified", status=400)
dimension_values = request.args.get('dimension_value')
if dimension_values is None:
return Response("Dimension values not specified", status=400)
ret = _GetDataSeries(dataset, slice, measure, dimension_values)
if ret is not None:
out = StringIO()
| pd.DataFrame(ret) | pandas.DataFrame |
import pandas as pd
from plotly import graph_objs as go
import os
import glob
import shapefile
import datetime as dt
def generate_figure(figure_title, time_series):
"""
Generate a figure from a list of time series Pandas DataFrames.
Args:
figure_title(str): Title of the figure.
time_series(list<pandas.DataFrame>): list of time series Pandas DataFrames.
"""
data = []
yaxis_title = 'No Data'
for index, df in enumerate(time_series):
column_name = df.columns[1]
yaxis_title = column_name
series_name = f'{column_name} {index + 1}' if len(time_series) > 1 else column_name
series_plot = go.Scatter(
x= | pd.to_datetime(df.iloc[:, 0], unit='ms') | pandas.to_datetime |
import time
import sys
import pandas as pd
from pandas import DataFrame as df
from daqmx_session import DAQmxSession
# Configure Testing Parameters Here
samples = 10000 # samples per trial
trials = 100
device = 'Dev1' # device alias as listed in NI MAX
channel = 'ao1' # for digital tasks use 'port#/line#', for analog 'ao# or ai#'
clk_src = 'OnboardClock' # can accept a physical channel
benchmark_method = 'analog_output' # methods listed in daqmx_session.py
generate_report = True
device_model = "USB-6363" # for documentation only
# Main benchmark function
def benchmark(session, method, device, channel, samples, trials, clk_src):
results = []
trials += 1 # Allows us to remove the first point which is cfg
for trial in range(trials):
sys.stdout.write("Trial Progress: %d of %d \r" % (trial+1, trials))
sys.stdout.flush() # progress indicator
t_start = time.perf_counter()
method(device, channel, samples, trial, clk_src) # calls target method
t_end = time.perf_counter()
t_elapsed = t_end - t_start
results.append(t_elapsed)
session.close() # Close DAQ Session
# print(results[0]) # Uncomment to see first iteration time
results_no_cfg = results[1:]
# Follow code is for console display until print statement
sys.stdout.write("\n\n") # Formatting
data = | df(results_no_cfg, columns=['Time']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy.stats
import h5py
import pkg_resources
import pybedtools
from DIGDriver.data_tools import mutation_tools
from DIGDriver.sequence_model import nb_model
def load_pretrained_model(h5, key='genic_model', restrict_cols=True):
""" Load a pretrained gene model
"""
## TODO: THIS WILL NEED UPDATING WHEN THE PRETRAINED MODULE IS FINALIZED
df_pretrain = pd.read_hdf(h5, key)
alpha, theta = nb_model.normal_params_to_gamma(df_pretrain.MU, df_pretrain.SIGMA)
df_pretrain['ALPHA'] = alpha
df_pretrain['THETA'] = theta
if key == 'genic_model':
df_pretrain.set_index(df_pretrain.GENE, inplace=True)
## Rename "P" columns (should do this at pretraining level!)
df_pretrain.rename({'P_MIS': 'Pi_MIS',
'P_NONS': 'Pi_NONS',
'P_SILENT': 'Pi_SYN',
'P_SPLICE': 'Pi_SPL',
'P_TRUNC': 'Pi_TRUNC',
'P_INDEL': 'Pi_INDEL',
}, axis=1, inplace=True
)
## Add in nonsynymous relative probability (should do this at pretraining level!)
df_pretrain['Pi_NONSYN'] = df_pretrain.Pi_MIS + df_pretrain.Pi_TRUNC
alpha_ind, theta_ind = nb_model.normal_params_to_gamma(df_pretrain.MU_INDEL, df_pretrain.SIGMA_INDEL)
df_pretrain['ALPHA_INDEL'] = alpha_ind
df_pretrain['THETA_INDEL'] = theta_ind
elif 'P_INDEL' in df_pretrain.columns:
df_pretrain.set_index(df_pretrain.ELT, inplace=True)
df_pretrain.rename({'P_SUM': 'Pi_SUM',
'P_INDEL': 'Pi_INDEL',
}, axis=1, inplace=True
)
alpha_ind, theta_ind = nb_model.normal_params_to_gamma(df_pretrain.MU_INDEL, df_pretrain.SIGMA_INDEL)
df_pretrain['ALPHA_INDEL'] = alpha_ind
df_pretrain['THETA_INDEL'] = theta_ind
else:
df_pretrain.set_index(df_pretrain.ELT, inplace=True)
df_pretrain.rename({'P_SUM': 'Pi_SUM',
}, axis=1, inplace=True
)
if restrict_cols:
if key == 'genic_model':
cols = ['CHROM', 'GENE_LENGTH', 'R_SIZE', 'R_OBS', 'R_INDEL',
'MU', 'SIGMA', 'ALPHA', 'THETA',
'MU_INDEL', 'SIGMA_INDEL', 'ALPHA_INDEL', 'THETA_INDEL', 'FLAG',
'Pi_SYN' ,'Pi_MIS', 'Pi_NONS', 'Pi_SPL', 'Pi_TRUNC', 'Pi_NONSYN', 'Pi_INDEL']
# elif "spliceAI" in key or "sites" in key:
elif 'Pi_INDEL' in df_pretrain.columns:
cols = ['ELT_SIZE', 'FLAG', 'R_SIZE', 'R_OBS', 'R_INDEL',
'MU', 'SIGMA', 'ALPHA', 'THETA',
'MU_INDEL', 'SIGMA_INDEL', 'ALPHA_INDEL', 'THETA_INDEL',
'Pi_SUM', 'Pi_INDEL'
]
else:
cols = ['R_OBS', 'MU', 'SIGMA', 'ALPHA', 'THETA', 'Pi_SUM']
# else:
# cols = ['CHROM', 'R_OBS', 'MU', 'SIGMA', 'ALPHA', 'THETA', 'Pi_SUM']
df_pretrain = df_pretrain[cols]
return df_pretrain
def read_mutations_cds(f_mut, f_cds=None):
""" Read mutations from a WES cohort annotated by DIGPreprocess
"""
# df_bed['LENGTH'] = df_bed.END - df_bed.START
df_mut = mutation_tools.read_mutation_file(f_mut, drop_duplicates=False, drop_sex=True)
df_mut_cds = df_mut[df_mut.GENE != '.']
# df_mut_cds = df_mut[df_mut.ANNOT != 'Noncoding']
if f_cds:
df_cds = pd.read_table(f_cds, names=['CHROM', 'START', 'END', 'GENE'], low_memory=False)
df_mut_cds = mutation_tools.restrict_mutations_by_bed(df_mut_cds, df_cds,
unique=True, replace_cols=True, remove_X=False
)
return df_mut_cds
def calc_scale_factor(df_mut, h5_pretrain, scale_type='genome'):
""" General purpose function to calculate cohort scaling factor
"""
df_dedup = mutation_tools.drop_duplicate_mutations(df_mut)
with h5py.File(h5_pretrain, 'r') as h5:
if scale_type == 'genome':
idx = h5['idx'][:]
mapp = h5['mappability'][:]
mapp_thresh = h5.attrs['mappability_threshold']
idx_mapp = idx[mapp > mapp_thresh]
df_idx = pd.DataFrame(idx_mapp, columns=['CHROM', 'START', 'END'])
df_inter = mutation_tools.restrict_mutations_by_bed(df_dedup, df_idx, remove_X=False)
N_MUT_GENOME = len(df_inter)
N_MUT_TRAIN = h5.attrs['N_MUT_TRAIN']
return N_MUT_GENOME / N_MUT_TRAIN
elif scale_type == 'exome':
N_MUT_EXOME = len(df_dedup[df_dedup.ANNOT != 'Noncoding'])
N_MUT_TRAIN = h5.attrs['N_MUT_CDS']
return N_MUT_EXOME / N_MUT_TRAIN
elif scale_type == 'sample':
N_SAMPLE = len(df_dedup.SAMPLE.unique())
N_SAMPLE_TRAIN = h5.attrs['N_SAMPLES']
return N_SAMPLE / N_SAMPLE_TRAIN
else:
raise ValueError("scale_type {} is not recognized".format(scale_type))
def calc_scale_factor_efficient(f_mut, h5_pretrain, scale_type='genome'):
""" General purpose function to calculate cohort scaling factor
"""
# df_dedup = mutation_tools.drop_duplicate_mutations(df_mut)
with h5py.File(h5_pretrain, 'r') as h5:
if scale_type == 'genome':
# idx = h5['idx'][:]
# mapp = h5['mappability'][:]
# mapp_thresh = h5.attrs['mappability_threshold']
# idx_mapp = idx[mapp > mapp_thresh]
# df_idx = pd.DataFrame(idx_mapp, columns=['CHROM', 'START', 'END'])
# bed_idx = pybedtools.BedTool.from_dataframe(df_idx)
# # df_inter = mutation_tools.restrict_mutations_by_bed(df_dedup, df_idx, remove_X=False)
# df_inter = mutation_tools.restrict_mutations_by_bed_efficient(f_mut, bed_idx.fn, bed12=False, drop_duplicates=True)
# N_MUT_GENOME = len(df_inter)
# N_MUT_TRAIN = h5.attrs['N_MUT_TRAIN']
regions = pd.read_hdf(h5_pretrain, 'region_params')
regions_pass = regions[~regions.FLAG]
bed_idx = pybedtools.BedTool.from_dataframe(regions_pass[['CHROM', 'START', 'END']])
df_inter = mutation_tools.restrict_mutations_by_bed_efficient(f_mut, bed_idx.fn, bed12=False, drop_duplicates=True)
N_SNV_EXP = regions_pass.Y_PRED.sum()
N_SNV_OBS = len(df_inter[df_inter.ANNOT != 'INDEL'])
N_IND_OBS = len(df_inter[df_inter.ANNOT == 'INDEL'])
cj_snv = N_SNV_OBS / N_SNV_EXP
cj_ind = N_IND_OBS / N_SNV_EXP
return cj_snv, cj_ind
# elif scale_type == 'exome':
# df_dedup = mutation_tools.read_mutation_file(f_mut, drop_duplicates=True)
# N_MUT_EXOME = len(df_dedup[df_dedup.ANNOT != 'Noncoding'])
# N_MUT_TRAIN = h5.attrs['N_MUT_CDS']
# return N_MUT_EXOME / N_MUT_TRAIN
# elif scale_type == 'sample':
# df_dedup = mutation_tools.read_mutation_file(f_mut, drop_duplicates=True)
# N_SAMPLE = len(df_dedup.SAMPLE.unique())
# N_SAMPLE_TRAIN = h5.attrs['N_SAMPLES']
# return N_SAMPLE / N_SAMPLE_TRAIN
else:
raise ValueError("scale_type {} is not recognized".format(scale_type))
def scale_factor_by_cds(h5_pretrain, df_mut_cds):
""" Calculate a cohort scaling factor based on the number of mutations
in the CDS regions of a cohort
"""
with h5py.File(h5_pretrain, 'r') as h5:
N_MUT_CDS = h5.attrs['N_MUT_CDS']
return len(df_mut_cds) / N_MUT_CDS
def scale_factor_by_samples(h5_pretrain, df_mut):
""" Calculate a cohort scaling factor based on the number of samples
in the target and reference cohorts
"""
with h5py.File(h5_pretrain, 'r') as h5:
N_SAMPLES = h5.attrs['N_SAMPLES']
return len(df_mut.SAMPLE.unique()) / N_SAMPLES
def transfer_gene_model(df_mut_cds, df_counts, df_pretrain, cj):
""" Transfer a pretrained gene model to a new cohort
Args:
df_mut_cds: dataframe of cds mutations from new cohort
df_pretrain: dataframe of pretrained gene model parameters
cj: scaling factor for transfer model
"""
## 1. Count mutations per gene
# df_counts = _mutations_per_gene(df_mut_cds)
# df_counts = pd.crosstab(df_mut_cds.GENE, df_mut_cds.ANNOT)
# df_counts.rename({'Missense': 'OBS_MIS',
# 'Nonsense': 'OBS_NONS',
# 'Synonymous': 'OBS_SYN',
# 'Essential_Splice': 'OBS_SPL'
# },
# axis=1, inplace=True
# )
cols_left = ['CHROM', 'GENE_LENGTH', 'R_SIZE', 'R_OBS', 'R_INDEL',
'MU', 'SIGMA', 'ALPHA', 'THETA',
'MU_INDEL', 'SIGMA_INDEL', 'ALPHA_INDEL', 'THETA_INDEL', 'FLAG',
'Pi_SYN' ,'Pi_MIS', 'Pi_NONS', 'Pi_SPL', 'Pi_TRUNC', 'Pi_NONSYN', 'Pi_INDEL']
cols_right = ['OBS_SYN', 'OBS_MIS', 'OBS_NONS', 'OBS_SPL', 'OBS_INDEL']
df_model = df_pretrain[cols_left].merge(df_counts[cols_right],
left_index=True, right_index=True, how='left'
)
## Fill nans
df_model.loc[df_model.OBS_MIS.isna(), 'OBS_MIS'] = 0
df_model.loc[df_model.OBS_NONS.isna(), 'OBS_NONS'] = 0
df_model.loc[df_model.OBS_SPL.isna(), 'OBS_SPL'] = 0
df_model.loc[df_model.OBS_SYN.isna(), 'OBS_SYN'] = 0
df_model.loc[df_model.OBS_INDEL.isna(), 'OBS_INDEL'] = 0
df_model['OBS_TRUNC'] = df_model.OBS_NONS + df_model.OBS_SPL
df_model['OBS_NONSYN'] = df_model.OBS_MIS + df_model.OBS_TRUNC
## 2. Count number of mutated samples per gene
df_syn = df_mut_cds[df_mut_cds.ANNOT == 'Synonymous']
df_mis = df_mut_cds[df_mut_cds.ANNOT == 'Missense']
df_non = df_mut_cds[df_mut_cds.ANNOT == 'Nonsense']
df_spl = df_mut_cds[df_mut_cds.ANNOT == 'Essential_Splice']
df_trunc = df_mut_cds[df_mut_cds.ANNOT.isin(['Nonsense', 'Essential_Splice'])]
df_nonsyn = df_mut_cds[df_mut_cds.ANNOT.isin(['Missense', 'Nonsense', 'Essential_Splice'])]
df_indel = df_mut_cds[df_mut_cds.ANNOT == 'INDEL']
sample_syn_cnt = df_syn.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_mis_cnt = df_mis.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_non_cnt = df_non.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_spl_cnt = df_spl.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_trunc_cnt = df_trunc.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_nonsyn_cnt = df_nonsyn.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
sample_indel_cnt = df_indel.groupby(['GENE', 'SAMPLE']).size().reset_index(name='CNT').GENE.value_counts()
df_model['N_SAMP_SYN'] = 0
df_model['N_SAMP_MIS'] = 0
df_model['N_SAMP_NONS'] = 0
df_model['N_SAMP_SPL'] = 0
df_model['N_SAMP_TRUNC'] = 0
df_model['N_SAMP_NONSYN'] = 0
df_model['N_SAMP_INDEL'] = 0
df_model.loc[sample_syn_cnt.index, 'N_SAMP_SYN'] = sample_syn_cnt
df_model.loc[sample_mis_cnt.index, 'N_SAMP_MIS'] = sample_mis_cnt
df_model.loc[sample_non_cnt.index, 'N_SAMP_NONS'] = sample_non_cnt
df_model.loc[sample_spl_cnt.index, 'N_SAMP_SPL'] = sample_spl_cnt
df_model.loc[sample_trunc_cnt.index, 'N_SAMP_TRUNC'] = sample_trunc_cnt
df_model.loc[sample_nonsyn_cnt.index, 'N_SAMP_NONSYN'] = sample_nonsyn_cnt
df_model.loc[sample_indel_cnt.index, 'N_SAMP_INDEL'] = sample_indel_cnt
## Scale theta
df_model.THETA = df_model.THETA * cj
return df_model
def transfer_element_model_with_indels(df_mut_tab, df_pretrain, cj, use_chrom=False):
""" Transfer a pretrained gene model to a new cohort
Args:
df_mut_tab: dataframe of mutations tabulated by element
df_pretrain: dataframe of pretrained element model
cj: scaling factor for transfer model
"""
if use_chrom:
cols_left = ['CHROM', 'R_OBS', 'MU', 'SIGMA', 'ALPHA', 'THETA', 'Pi_SUM']
else:
cols_left = ['ELT_SIZE', 'FLAG', 'R_SIZE', 'R_OBS', 'R_INDEL',
'MU', 'SIGMA', 'ALPHA', 'THETA',
'MU_INDEL', 'SIGMA_INDEL', 'ALPHA_INDEL', 'THETA_INDEL',
'Pi_SUM', 'Pi_INDEL'
]
cols_right = ['OBS_SAMPLES', 'OBS_SNV', 'OBS_INDEL']
# cols_right = ['OBS_SAMPLES', 'OBS_MUT']
df_model = df_pretrain[cols_left].merge(df_mut_tab[cols_right],
left_index=True, right_index=True, how='left'
)
df_model.loc[df_model.OBS_SNV.isna(), 'OBS_SNV'] = 0
df_model.loc[df_model.OBS_INDEL.isna(), 'OBS_INDEL'] = 0
df_model.loc[df_model.OBS_SAMPLES.isna(), 'OBS_SAMPLES'] = 0
## Scale theta
df_model.THETA = df_model.THETA * cj
return df_model
def transfer_element_model(df_mut_tab, df_pretrain, cj, use_chrom=False):
""" Transfer a pretrained gene model to a new cohort
Args:
df_mut_tab: dataframe of mutations tabulated by element
df_pretrain: dataframe of pretrained element model
cj: scaling factor for transfer model
"""
if use_chrom:
cols_left = ['CHROM', 'R_OBS', 'MU', 'SIGMA', 'ALPHA', 'THETA', 'Pi_SUM']
else:
cols_left = ['R_OBS', 'MU', 'SIGMA', 'ALPHA', 'THETA', 'Pi_SUM']
cols_right = ['OBS_SAMPLES', 'OBS_SNV']
# cols_right = ['OBS_SAMPLES', 'OBS_MUT']
df_model = df_pretrain[cols_left].merge(df_mut_tab[cols_right],
left_index=True, right_index=True, how='left'
)
df_model.loc[df_model.OBS_SNV.isna(), 'OBS_SNV'] = 0
df_model.loc[df_model.OBS_SAMPLES.isna(), 'OBS_SAMPLES'] = 0
## Scale theta
df_model.THETA = df_model.THETA * cj
return df_model
def gene_expected_muts_nb(df_model):
""" Calculated expected mutations in genes based on transferred NB model
"""
df_model['EXP_SYN'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SYN
df_model['EXP_MIS'] = df_model.ALPHA * df_model.THETA * df_model.Pi_MIS
df_model['EXP_NONS'] = df_model.ALPHA * df_model.THETA * df_model.Pi_NONS
df_model['EXP_SPL'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SPL
df_model['EXP_TRUNC'] = df_model.ALPHA * df_model.THETA * df_model.Pi_TRUNC
df_model['EXP_NONSYN'] = df_model.ALPHA * df_model.THETA * df_model.Pi_NONSYN
return df_model
def element_expected_muts_nb(df_model):
df_model['EXP_SNV'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SUM
# OBS_MUT = df_model.OBS_MUT.values.copy()
# OBS_SAMPLES = df_model.OBS_SAMPLES.values.copy()
# OBS_SAMPLES[OBS_MUT == 0] = 1
# OBS_MUT[OBS_MUT == 0] = 1
# SCALE_SAMPLE = OBS_SAMPLES / OBS_MUT
# df_model['EXP_SAMPLES'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SUM * SCALE_SAMPLE
return df_model
# def _calc_sample_scale(N_SAMP, OBS_MUT):
# if OBS_MUT == 0:
# return 1
# else:
# return N_SAMP / OBS_MUT
def gene_expected_muts_dnds(df_model):
""" Calculate expected mutations in genes using dNdS correction
"""
## Baseline expected mutations from transfer model
df_model['EXP_SYN'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SYN
df_model['EXP_MIS'] = df_model.ALPHA * df_model.THETA * df_model.Pi_MIS
df_model['EXP_NONS'] = df_model.ALPHA * df_model.THETA * df_model.Pi_NONS
df_model['EXP_SPL'] = df_model.ALPHA * df_model.THETA * df_model.Pi_SPL
df_model['EXP_TRUNC'] = df_model.ALPHA * df_model.THETA * df_model.Pi_TRUNC
df_model['EXP_NONSYN'] = df_model.ALPHA * df_model.THETA * df_model.Pi_NONSYN
## MLE estimate of neutral mutation rate
df_model['T_SYN'] = [_mle_t(row.OBS_SYN, 1, row.ALPHA, row.THETA*row.Pi_SYN)
for i, row in df_model.iterrows()
]
## Mutation rate correction factor
df_model['MRFOLD'] = [_mrfold_factor(row.T_SYN, row.EXP_SYN)
for i, row in df_model.iterrows()
]
## Rate-corrected expected mutations
df_model['EXP_SYN_ML'] = df_model.EXP_SYN * df_model.MRFOLD
df_model['EXP_MIS_ML'] = df_model.EXP_MIS * df_model.MRFOLD
df_model['EXP_NONS_ML'] = df_model.EXP_NONS * df_model.MRFOLD
df_model['EXP_SPL_ML'] = df_model.EXP_SPL * df_model.MRFOLD
df_model['EXP_TRUNC_ML'] = df_model.EXP_TRUNC * df_model.MRFOLD
df_model['EXP_NONSYN_ML'] = df_model.EXP_NONSYN * df_model.MRFOLD
return df_model
def gene_pvalue_burden_nb(df_model):
""" Calculate burden P-values based on the transfered NB model params
"""
# PVAL_SYN, PVAL_MIS, PVAL_NONS, PVAL_SPL, PVAL_TRUNC, PVAL_NONSYN = [], [], [], [], [], []
# for i, row in df_model.iterrows():
# PVAL_SYN.append(nb_model.nb_pvalue_greater_midp(row.OBS_SYN, row.ALPHA,
# 1 / (row.THETA * row.Pi_SYN + 1)
# )
# )
# PVAL_MIS.append(nb_model.nb_pvalue_greater_midp(row.OBS_MIS, row.ALPHA,
# 1 / (row.THETA * row.Pi_MIS + 1)
# )
# )
# PVAL_NONS.append(nb_model.nb_pvalue_greater_midp(row.OBS_NONS, row.ALPHA,
# 1 / (row.THETA * row.Pi_NONS + 1)
# )
# )
# PVAL_SPL.append(nb_model.nb_pvalue_greater_midp(row.OBS_SPL, row.ALPHA,
# 1 / (row.THETA * row.Pi_SPL + 1)
# )
# )
# PVAL_TRUNC.append(nb_model.nb_pvalue_greater_midp(row.OBS_TRUNC, row.ALPHA,
# 1 / (row.THETA * row.Pi_TRUNC + 1)
# )
# )
# PVAL_NONSYN.append(nb_model.nb_pvalue_greater_midp(row.OBS_NONSYN, row.ALPHA,
# 1 / (row.THETA * row.Pi_NONSYN + 1)
# )
# )
df_model['PVAL_SYN_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_SYN,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SYN + 1)
)
df_model['PVAL_MIS_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_MIS,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_MIS + 1)
)
df_model['PVAL_NONS_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_NONS,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_NONS + 1)
)
df_model['PVAL_SPL_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_SPL,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SPL + 1)
)
df_model['PVAL_TRUNC_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_TRUNC,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_TRUNC + 1)
)
df_model['PVAL_NONSYN_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_NONSYN,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_NONSYN + 1)
)
return df_model
def element_pvalue_burden_nb_DEPRECATED(df_model):
""" Calculate burden P-values based on the transfered NB model params
"""
PVAL_SNV = []
for i, row in df_model.iterrows():
PVAL_SNV.append(nb_model.nb_pvalue_greater_midp(row.OBS_SNV, row.ALPHA,
1 / (row.THETA * row.Pi_SUM + 1)
)
)
df_model['PVAL_SNV_BURDEN'] = PVAL_SNV
return df_model
def element_pvalue_burden_nb(df_model):
""" Calculate burden P-values based on the transfered NB model params
"""
df_model['PVAL_SNV_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_SNV,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SUM + 1)
)
return df_model
def gene_pvalue_burden_nb_by_sample(df_model):
""" Calculate burden P-values based on the transfered NB model params.
Test based only on the number of *mutated* samples per gene
"""
# def _calc_sample_scale(OBS_MUT, N_SAMP):
# def _calc_sample_scale(N_SAMP, OBS_MUT):
# if OBS_MUT == 0:
# return 1
# else:
# return N_SAMP / OBS_MUT
# PVAL_SYN, PVAL_MIS, PVAL_NONS, PVAL_SPL, PVAL_TRUNC, PVAL_NONSYN = [], [], [], [], [], []
# # C_SYN, C_MIS, C_NONS, C_SPL, C_TRUNC, C_NONSYN = [], [], [], [], [], []
# for i, row in df_model.iterrows():
# # c_syn = _calc_sample_scale(row.N_SAMP_SYN, row.OBS_SYN)
# PVAL_SYN.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_SYN, row.ALPHA,
# 1 / (row.THETA * row.Pi_SYN + 1)
# # 1 / (row.THETA * row.Pi_SYN * c_syn + 1)
# )
# )
# # C_SYN.append(c_syn)
# # c_mis = _calc_sample_scale(row.N_SAMP_MIS, row.OBS_MIS)
# PVAL_MIS.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_MIS, row.ALPHA,
# 1 / (row.THETA * row.Pi_MIS + 1)
# # 1 / (row.THETA * row.Pi_MIS * c_mis + 1)
# )
# )
# # C_MIS.append(c_mis)
# # c_nons = _calc_sample_scale(row.N_SAMP_NONS, row.OBS_NONS)
# PVAL_NONS.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_NONS, row.ALPHA,
# 1 / (row.THETA * row.Pi_NONS + 1)
# # 1 / (row.THETA * row.Pi_NONS * c_nons + 1)
# )
# )
# # C_NONS.append(c_nons)
# # c_spl = _calc_sample_scale(row.N_SAMP_SPL, row.OBS_SPL)
# PVAL_SPL.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_SPL, row.ALPHA,
# 1 / (row.THETA * row.Pi_SPL + 1)
# # 1 / (row.THETA * row.Pi_SPL * c_spl + 1)
# )
# )
# # C_SPL.append(c_spl)
# # c_trunc = _calc_sample_scale(row.N_SAMP_TRUNC, row.OBS_TRUNC)
# PVAL_TRUNC.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_TRUNC, row.ALPHA,
# 1 / (row.THETA * row.Pi_TRUNC + 1)
# # 1 / (row.THETA * row.Pi_TRUNC * c_trunc + 1)
# )
# )
# # C_TRUNC.append(c_trunc)
# # c_nonsyn = _calc_sample_scale(row.N_SAMP_NONSYN, row.OBS_NONSYN)
# PVAL_NONSYN.append(nb_model.nb_pvalue_greater_midp(row.N_SAMP_NONSYN, row.ALPHA,
# 1 / (row.THETA * row.Pi_NONSYN + 1)
# # 1 / (row.THETA * row.Pi_NONSYN * c_nonsyn + 1)
# )
# )
# # C_NONSYN.append(c_nonsyn)
# df_model['PVAL_SYN_BURDEN_SAMPLE'] = PVAL_SYN
# df_model['PVAL_MIS_BURDEN_SAMPLE'] = PVAL_MIS
# df_model['PVAL_NONS_BURDEN_SAMPLE'] = PVAL_NONS
# df_model['PVAL_SPL_BURDEN_SAMPLE'] = PVAL_SPL
# df_model['PVAL_TRUNC_BURDEN_SAMPLE'] = PVAL_TRUNC
# df_model['PVAL_NONSYN_BURDEN_SAMPLE'] = PVAL_NONSYN
df_model['PVAL_SYN_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_SYN,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SYN + 1)
)
df_model['PVAL_MIS_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_MIS,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_MIS + 1)
)
df_model['PVAL_NONS_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_NONS,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_NONS + 1)
)
df_model['PVAL_SPL_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_SPL,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SPL + 1)
)
df_model['PVAL_TRUNC_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_TRUNC,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_TRUNC + 1)
)
df_model['PVAL_NONSYN_BURDEN_SAMPLE'] = nb_model.nb_pvalue_greater_midp(
df_model.N_SAMP_NONSYN,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_NONSYN + 1)
)
# df_model['C_SYN_BURDEN_SAMPLE'] = C_SYN
# df_model['C_MIS_BURDEN_SAMPLE'] = C_MIS
# df_model['C_NONS_BURDEN_SAMPLE'] = C_NONS
# df_model['C_SPL_BURDEN_SAMPLE'] = C_SPL
# df_model['C_TRUNC_BURDEN_SAMPLE'] = C_TRUNC
# df_model['C_NONSYN_BURDEN_SAMPLE'] = C_NONSYN
return df_model
def element_pvalue_burden_nb_by_sample(df_model):
""" Calculate burden P-values based on the transfered NB model params.
Test based only on the number of *mutated* samples per gene
"""
# PVAL_MUT = []
# for i, row in df_model.iterrows():
# # c_mut = _calc_sample_scale(row.OBS_SAMPLES, row.OBS_MUT)
# PVAL_MUT.append(nb_model.nb_pvalue_greater_midp(row.OBS_SAMPLES, row.ALPHA,
# 1 / (row.THETA * row.Pi_SUM + 1)
# # 1 / (row.THETA * row.Pi_SUM * c_mut + 1)
# )
# )
# df_model['PVAL_SAMPLE_BURDEN'] = PVAL_MUT
df_model['PVAL_SAMPLE_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_SAMPLES,
df_model.ALPHA,
1 / (df_model.THETA * df_model.Pi_SUM + 1)
)
return df_model
def gene_pvalue_burden_dnds(df_model):
""" Calculate burden P-values based on the dnds-corrected expected values
"""
PVAL_SYN, PVAL_MIS, PVAL_NONS, PVAL_SPL, PVAL_TRUNC, PVAL_NONSYN = [], [], [], [], [], []
for i, row in df_model.iterrows():
PVAL_SYN.append(nb_model.nb_pvalue_greater_midp(row.OBS_SYN, row.ALPHA,
1 / (row.EXP_SYN_ML / row.ALPHA + 1)
)
)
PVAL_MIS.append(nb_model.nb_pvalue_greater_midp(row.OBS_MIS, row.ALPHA,
1 / (row.EXP_MIS_ML / row.ALPHA + 1)
)
)
PVAL_NONS.append(nb_model.nb_pvalue_greater_midp(row.OBS_NONS, row.ALPHA,
1 / (row.EXP_NONS_ML / row.ALPHA + 1)
)
)
PVAL_SPL.append(nb_model.nb_pvalue_greater_midp(row.OBS_SPL, row.ALPHA,
1 / (row.EXP_SPL_ML / row.ALPHA + 1)
)
)
PVAL_TRUNC.append(nb_model.nb_pvalue_greater_midp(row.OBS_TRUNC, row.ALPHA,
1 / (row.EXP_TRUNC_ML / row.ALPHA + 1)
)
)
PVAL_NONSYN.append(nb_model.nb_pvalue_greater_midp(row.OBS_NONSYN, row.ALPHA,
1 / (row.EXP_NONSYN_ML / row.ALPHA + 1)
)
)
df_model['PVAL_SYN_BURDEN_DNDS'] = PVAL_SYN
df_model['PVAL_MIS_BURDEN_DNDS'] = PVAL_MIS
df_model['PVAL_NONS_BURDEN_DNDS'] = PVAL_NONS
df_model['PVAL_SPL_BURDEN_DNDS'] = PVAL_SPL
df_model['PVAL_TRUNC_BURDEN_DNDS'] = PVAL_TRUNC
df_model['PVAL_NONSYN_BURDEN_DNDS'] = PVAL_NONSYN
return df_model
def gene_pvalue_sel_nb(df_model):
""" Calculate dNdS selection p-values using a conservative NB model
(NB model integrates over uncertainty in the rate estimate)
"""
PVAL_SYN, PVAL_MIS, PVAL_TRUNC, PVAL_NONSYN = [], [], [], []
for i, row in df_model.iterrows():
p_syn, p_mis, p_trunc, p_nonsyn = _llr_test_nb(row)
PVAL_SYN.append(p_syn)
PVAL_MIS.append(p_mis)
PVAL_TRUNC.append(p_trunc)
PVAL_NONSYN.append(p_nonsyn)
df_model['PVAL_SYN_SEL_NB'] = PVAL_SYN
df_model['PVAL_MIS_SEL_NB'] = PVAL_MIS
# df_model['PVAL_NONS_SEL_NB'] = PVAL_NONS
# df_model['PVAL_SPL_SEL_NB'] = PVAL_NONS
df_model['PVAL_TRUNC_SEL_NB'] = PVAL_TRUNC
df_model['PVAL_NONSYN_SEL_NB'] = PVAL_NONSYN
return df_model
def gene_pvalue_indel_by_transfer(df_model):
## Length of genes
df_cds = pd.read_table(
pkg_resources.resource_filename('DIGDriver', 'data/dndscv_gene_cds.bed.gz'),
names=['CHROM', 'START', 'END', 'GENE'],
low_memory=False
)
df_cds['LENGTH'] = df_cds.END - df_cds.START
df_cds_l = df_cds.pivot_table(index='GENE', values='LENGTH', aggfunc=np.sum)
df_model = df_model.merge(df_cds_l['LENGTH'], left_index=True, right_index=True, how='left')
## Probability of indels within each gene under uniform distribution
df_model['Pi_INDEL'] = df_model.LENGTH / (df_model.R_SIZE)
## Non CGC genes for scaling factor
f_panel = 'data/genes_CGC_ALL.txt'
df_genes = pd.read_table(pkg_resources.resource_stream('DIGDriver', f_panel), names=['GENE'])
all_cosmic = df_genes.GENE.to_list() + ['CDKN2A.p14arf', 'CDKN2A.p16INK4a']
df_model_null = df_model[~df_model.index.isin(all_cosmic)]
## Expected Uniform indel rate
EXP_INDEL_UNIF = (df_model_null.Pi_INDEL * df_model_null.ALPHA * df_model_null.THETA).sum()
OBS_INDEL = df_model_null.OBS_INDEL.sum()
t_indel = OBS_INDEL / EXP_INDEL_UNIF
df_model['THETA_INDEL'] = df_model.THETA * t_indel
df_model['EXP_INDEL'] = df_model.ALPHA * df_model.THETA_INDEL * df_model.Pi_INDEL
df_model['PVAL_INDEL_BURDEN'] = [nb_model.nb_pvalue_greater_midp(row.OBS_INDEL, row.ALPHA, 1 / (row.THETA_INDEL*row.Pi_INDEL + 1)) for i, row in df_model.iterrows()]
return df_model
def gene_pvalue_indel(df_model):
f_panel = 'data/genes_CGC_ALL.txt'
df_genes = pd.read_table(pkg_resources.resource_stream('DIGDriver', f_panel), names=['GENE'])
all_cosmic = df_genes.GENE.to_list() + ['CDKN2A.p14arf', 'CDKN2A.p16INK4a']
df_model_null = df_model[~df_model.index.isin(all_cosmic)]
## Expected Uniform indel rate
EXP_INDEL_UNIF = (df_model_null.Pi_INDEL * df_model_null.ALPHA_INDEL * df_model_null.THETA_INDEL).sum()
OBS_INDEL = df_model_null.OBS_INDEL.sum()
t_indel = OBS_INDEL / EXP_INDEL_UNIF
df_model['THETA_INDEL'] = df_model.THETA_INDEL * t_indel
df_model['EXP_INDEL'] = df_model.ALPHA_INDEL * df_model.THETA_INDEL * df_model.Pi_INDEL
# df_model['PVAL_INDEL_BURDEN'] = [nb_model.nb_pvalue_greater_midp(row.OBS_INDEL, row.ALPHA_INDEL, 1 / (row.THETA_INDEL*row.Pi_INDEL + 1)) for i, row in df_model.iterrows()]
df_model['PVAL_INDEL_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_INDEL,
df_model.ALPHA_INDEL,
1 / (df_model.THETA_INDEL * df_model.Pi_INDEL + 1)
)
return df_model
def element_pvalue_indel(df_model, t_indel):
# EXP_INDEL_UNIF = (df_model.Pi_INDEL * df_model.ALPHA_INDEL * df_model.THETA_INDEL).sum()
# OBS_INDEL = df_model.OBS_INDEL.sum()
# t_indel_bck = OBS_INDEL / EXP_INDEL_UNIF
# print(t_indel_bck)
df_model['THETA_INDEL'] = df_model.THETA_INDEL * t_indel
df_model['EXP_INDEL'] = df_model.ALPHA_INDEL * df_model.THETA_INDEL * df_model.Pi_INDEL
# df_model['PVAL_INDEL_BURDEN'] = [nb_model.nb_pvalue_greater_midp(row.OBS_INDEL, row.ALPHA_INDEL, 1 / (row.THETA_INDEL*row.Pi_INDEL + 1)) for i, row in df_model.iterrows()]
df_model['PVAL_INDEL_BURDEN'] = nb_model.nb_pvalue_greater_midp(
df_model.OBS_INDEL,
df_model.ALPHA_INDEL,
1 / (df_model.THETA_INDEL * df_model.Pi_INDEL + 1)
)
return df_model
def gene_pvalue_sel_gamma(df_model):
""" Calculate dNdS selection p-values using a more aggressive gamma-poisson model
"""
PVAL_SYN, PVAL_MIS, PVAL_NONS, PVAL_NONSYN = [], [], [], []
for i, row in df_model.iterrows():
p_syn, p_mis, p_nons, p_nonsyn = _llr_test_gamma_poiss(row)
PVAL_SYN.append(p_syn)
PVAL_MIS.append(p_mis)
PVAL_NONS.append(p_nons)
PVAL_NONSYN.append(p_nonsyn)
df_model['PVAL_SYN_SEL_PG'] = PVAL_SYN
df_model['PVAL_MIS_SEL_PG'] = PVAL_MIS
df_model['PVAL_NONS_SEL_PG'] = PVAL_NONS
df_model['PVAL_NONSYN_SEL_PG'] = PVAL_NONSYN
return df_model
def annotate_known_genes(df, key='GENE'):
""" Annotate known driver genes based on existing databases
"""
## TODO: Remove hard-coded paths
## 1. Load databases
df_cgc = pd.read_excel('/data/cb/maxas/data/projects/cancer_mutations/DRIVER_DBs/COSMIC_CGC_allMon_Oct_12_18_34_22_2020.xlsx')
df_oncokb = | pd.read_table('/data/cb/maxas/data/projects/cancer_mutations/DRIVER_DBs/OncoKB_cancerGeneList.txt') | pandas.read_table |
import pytest
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from time_series_experiments.pipeline import Pipeline, ColumnsProcessor, Step
from time_series_experiments.pipeline.dataset import DatasetConfig, VarType
from time_series_experiments.pipeline.validation import BacktestingCrossVal
from time_series_experiments.pipeline.tasks import (
DateFeatures,
Wrap,
OrdCat,
TargetLag,
)
from time_series_experiments.experiment.runner import TrainingParams, ExperimentRunner
from ..conftest import generate_target
@pytest.fixture
def dataset():
nrows = 5000
start = "2018-01-01 00:00:00"
freq = "1H"
dates = pd.date_range(start=start, freq=freq, periods=nrows)
y = generate_target(nrows, freq=freq, start=start)
return | pd.DataFrame({"target": y, "date": dates}) | pandas.DataFrame |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.timeseries import SsaForecaster
class TestSsaForecaster(unittest.TestCase):
def test_simple_forecast(self):
seasonality_size = 5
seasonal_data = np.arange(seasonality_size)
data = np.tile(seasonal_data, 3)
X_train = | pd.Series(data, name="ts") | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 23:16:15 2020
@author: Eli
"""
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import cross_val_predict
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import confusion_matrix
from sklearn import svm
from joblib import dump
import os
import pandas as pd
import numpy as np
class Classifier:
"""
Simple class for keeping track of data and running
crossvalidation on sklearn classifiers
"""
def __init__(self, model):
self.data = []
self.set_model(model)
def set_model(self, model):
"""
Sets the model for classification
Parameters
----------
model : sklearn classifier
Returns
-------
None.
"""
self.model = model
def load_data(self, data, cols = None):
"""
Loads data and appends it to the internal dataset
Parameters
----------
data : pd dataframe or path to load one
feature matrix, where the column 'label' has the class
cols : list, optional
list of columns to keep. If none is given then keeps all
Returns
-------
None.
"""
if isinstance(data, str):
data = pd.read_csv(data)
data_to_append = data.copy()
#get column subset if needed
if cols is not None:
cols_ = cols[:]
if 'user_id' not in cols:
cols_.append('user')
if 'label' not in cols:
cols_.append('label')
if 'dataset' not in cols:
cols_.append('dataset')
data_to_append = data_to_append[cols_]
self.data.append(data_to_append)
def crossval(self, split_col = 'user', cols = None, col_score_split=['user','label'],
n_jobs = 1):
"""
Creates a crossvalidated classifier
Parameters
----------
split_col : str , optional
column to perform the crossvalidation over
cols : list, optional
list of columns to use in the classifier. If none is given then keeps all
col_score_split: list of str
list of columns to calculate the score breakdown on
n_jobs: int
number of cores to give to sklearn. Colin set to 2, eli and kai
with tiny computers set to 1
Returns
-------
a dictionary of accuracy breakdowns by different categories
"""
#concatenate all of the data together
if len(self.data) > 1:
all_data = pd.concat(self.data, axis=0, ignore_index=True, copy=False)
elif len(self.data) == 1:
all_data = self.data[0]
else:
raise ValueError("I gots no data :'(")
#select columns
y = all_data['label'].values
groups = all_data[split_col].values
cv = GroupKFold(n_splits=len(np.unique(groups)))
if cols is None:
cols_ = [c for c in all_data.columns if c not in ['label','dataset','user']]
else:
cols_ = cols
X = all_data[cols_].to_numpy()
print("Beginning model evaluation...")
# scores = cross_validate(estimator = self.model,
# X = X, y = y, groups=groups,
# cv=cv,
# return_train_score=False,
# return_estimator=True, n_jobs=2)
preds = cross_val_predict(estimator=self.model,
X=X, y=y, groups=groups,
cv=cv, n_jobs=n_jobs)
# scores are in the order of the groups, so the first row out is the
# result of training on the other groups, and testing on the first group
#self.scores = scores
self.preds = preds
self.y_true = y
#do a score breakdown by unique value
scores = {}
for col in col_score_split:
unique_vals = np.unique(all_data[col])
accuracy = np.zeros(len(unique_vals))
for i,val in enumerate(unique_vals):
entries = all_data[col] == val
accuracy[i] = np.sum(self.preds[entries] == y[entries])/np.sum(entries)
scores[col] = | pd.DataFrame({col:unique_vals,'accuracy':accuracy}) | pandas.DataFrame |
from typing import Any, Dict, Type # NOQA
import logging
from easydict import EasyDict
from kedro.utils import load_obj
import numpy as np
import pandas as pd
import sklearn # NOQA
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
precision_score,
recall_score,
roc_auc_score,
)
log = logging.getLogger(__name__)
def get_cols_features(
df,
non_feature_cols=[
"Treatment",
"Outcome",
"TransformedOutcome",
"Propensity",
"Recommendation",
],
):
return [column for column in df.columns if column not in non_feature_cols]
def concat_train_test(args, train, test):
r"""
Concatenate train and test series.
Use series.xs('train') or series.xs('test') to split
"""
if test is None:
series = pd.concat(
[pd.Series(train)],
keys=["train"],
names=[args.partition_name, args.index_name],
)
else:
series = pd.concat(
[pd.Series(train), pd.Series(test)],
keys=["train", "test"],
names=[args.partition_name, args.index_name],
)
return series
def concat_train_test_df(args, train, test):
r"""
Concatenate train and test data frames.
Use df.xs('train') or df.xs('test') to split.
"""
df = pd.concat(
[train, test],
keys=["train", "test"],
names=[args.partition_name, args.index_name],
)
return df
def len_t(df, treatment=1.0, col_treatment="Treatment"):
return df.query("{}=={}".format(col_treatment, treatment)).shape[0]
def len_o(df, outcome=1.0, col_outcome="Outcome"):
return df.query("{}=={}".format(col_outcome, outcome)).shape[0]
def len_to(
df, treatment=1.0, outcome=1.0, col_treatment="Treatment", col_outcome="Outcome"
):
len_ = df.query(
"{}=={} & {}=={}".format(col_treatment, treatment, col_outcome, outcome)
).shape[0]
return len_
def treatment_fraction_(df, col_treatment="Treatment"):
return len_t(df, col_treatment=col_treatment) / len(df)
def treatment_fractions_(
args, # type: Dict[str, Any]
df, # type: Type[pd.DataFrame]
):
# type: (...) -> Type[EasyDict]
col_treatment = args.col_treatment
treatment_fractions = {
"train": treatment_fraction_(df.xs("train"), col_treatment=col_treatment),
"test": treatment_fraction_(df.xs("test"), col_treatment=col_treatment),
}
return EasyDict(treatment_fractions)
def outcome_fraction_(df, col_outcome="Outcome"):
return len_o(df, col_outcome=col_outcome) / len(df)
def overall_uplift_gain_(
df, treatment=1.0, outcome=1.0, col_treatment="Treatment", col_outcome="Outcome"
):
overall_uplift_gain = (
len_to(df, col_treatment=col_treatment, col_outcome=col_outcome)
/ len_t(df, col_treatment=col_treatment)
) - (
len_to(df, 0, 1, col_treatment=col_treatment, col_outcome=col_outcome)
/ len_t(df, 0, col_treatment=col_treatment)
)
return overall_uplift_gain
def gain_tuple(df_, r_):
treatment_fraction = treatment_fraction_(df_)
outcome_fraction = outcome_fraction_(df_)
overall_uplift_gain = overall_uplift_gain_(df_)
cgain = np.interp(treatment_fraction, r_.cgains_x, r_.cgains_y)
cgain_base = overall_uplift_gain * treatment_fraction
cgain_factor = cgain / cgain_base
return (
treatment_fraction,
outcome_fraction,
overall_uplift_gain,
cgain,
cgain_base,
cgain_factor,
r_.Q_cgains,
r_.q1_cgains,
r_.q2_cgains,
)
def score_df(y_train, y_test, y_pred_train, y_pred_test, average="binary"):
if (
y_train is not None
and y_pred_train is not None
and len(y_train) != len(y_pred_train)
):
raise Exception("Lengths of true and predicted for train do not match.")
if (
y_test is not None
and y_pred_test is not None
and len(y_test) != len(y_pred_test)
):
raise Exception("Lengths of true and predicted for test do not match.")
score_df = pd.DataFrame()
for (partition_, y_, y_pred_) in [
("train", y_train, y_pred_train),
("test", y_test, y_pred_test),
]:
if (
y_ is not None
and y_pred_ is not None
and (0 <= y_).all()
and (y_ <= 1).all()
and (0 <= y_pred_).all()
and (y_pred_ <= 1).all()
):
num_classes = pd.Series(y_).nunique()
score_list = [
len(y_),
pd.Series(y_).nunique(),
accuracy_score(y_, y_pred_),
precision_score(y_, y_pred_, average=average),
recall_score(y_, y_pred_, average=average),
f1_score(y_, y_pred_, average=average),
] + (
[
roc_auc_score(y_, y_pred_),
pd.Series(y_).mean(),
pd.Series(y_pred_).mean(),
]
if num_classes == 2
else []
)
column_list = [
"# samples",
"# classes",
"accuracy",
"precision",
"recall",
"f1",
] + (
["roc_auc", "observed CVR", "predicted CVR"] if num_classes == 2 else []
)
score_df_ = pd.DataFrame(
[score_list], index=[partition_], columns=column_list,
)
score_df = score_df.append(score_df_)
return score_df
def conf_mat_df(y_true, y_pred):
conf_mat = confusion_matrix(y_true, y_pred)
num_class = len(conf_mat)
true_labels = ["True_{}".format(i) for i in range(num_class)]
pred_labels = ["Pred_{}".format(i) for i in range(num_class)]
conf_mat_df = pd.DataFrame(conf_mat, index=true_labels, columns=pred_labels)
return conf_mat_df
def bundle_train_and_test_data(args, train_df, test_df):
assert isinstance(train_df, pd.DataFrame)
# assert isinstance(test_df, pd.DataFrame)
# assert set(train_df.columns) == set(test_df.columns)
assert all([isinstance(col_name, str) for col_name in train_df.columns])
index_name = args.index_name
if index_name is not None:
train_df = train_df.reset_index(drop=True).copy()
train_df.index.name = index_name
if test_df is not None:
test_df = test_df.reset_index(drop=True).copy()
test_df.index.name = index_name
elif test_df is not None:
assert train_df.index.name == test_df.index.name
df = concat_train_test_df(args, train_df, test_df)
return df
def impute_cols_features(args, df):
non_feature_cols = [
args.col_treatment,
args.col_outcome,
args.col_propensity,
args.col_cate,
args.col_recommendation,
]
args.cols_features = args.cols_features or get_cols_features(
df, non_feature_cols=non_feature_cols
)
return args
def compute_cate(proba_treated, proba_untreated):
cate_estimated = proba_treated - proba_untreated
return cate_estimated
def add_cate_to_df(args, df, cate_estimated, proba_treated, proba_untreated):
df.loc[:, args.col_proba_if_treated] = proba_treated
df.loc[:, args.col_proba_if_untreated] = proba_untreated
df.loc[:, args.col_cate] = cate_estimated.values
return df
def recommend_by_cate(args, df, treatment_fractions):
cate_series = df[args.col_cate]
def recommendation(cate_series, treatment_fraction):
rank_series = cate_series.rank(method="first", ascending=False, pct=True)
r = np.where(rank_series <= treatment_fraction, 1.0, 0.0)
return r
recommendation_train = recommendation(
cate_series.xs("train"), treatment_fractions.train
)
recommendation_test = recommendation(
cate_series.xs("test"), treatment_fractions.test
)
df.loc[:, args.col_recommendation] = concat_train_test(
args, recommendation_train, recommendation_test
)
return df
def estimate_effect(args, sim_treated_df, sim_untreated_df):
estimated_effect_df = | pd.DataFrame() | pandas.DataFrame |
"""
Module to generate counterfactual explanations from a KD-Tree
This code is similar to 'Interpretable Counterfactual Explanations Guided by Prototypes': https://arxiv.org/pdf/1907.02584.pdf
"""
from dice_ml.explainer_interfaces.explainer_base import ExplainerBase
import numpy as np
import timeit
from sklearn.neighbors import KDTree
import pandas as pd
import copy
import random
from dice_ml import diverse_counterfactuals as exp
class DiceKD(ExplainerBase):
def __init__(self, data_interface, model_interface):
"""Init method
:param data_interface: an interface class to access data related params.
:param model_interface: an interface class to access trained ML model.
"""
self.total_random_inits = 0
super().__init__(data_interface) # initiating data related parameters
# initializing model variables
self.model = model_interface
# loading trained model
self.model.load_model()
# number of output nodes of ML model
self.num_output_nodes = self.model.get_num_output_nodes(len(self.data_interface.encoded_feature_names))
self.predicted_outcome_name = self.data_interface.outcome_name + '_pred'
# Partitioned dataset and KD Tree for each class (binary) of the dataset
self.dataset_with_predictions, self.dataset_with_predictions_size, self.KD_tree, self.predictions = self.build_KD_tree()
def build_KD_tree(self):
# Stores the predictions on the training data
dataset_instance = self.data_interface.prepare_query_instance(
query_instance=self.data_interface.data_df[self.data_interface.feature_names], encoding='one-hot')
dataset_dict_output = np.array([dataset_instance.values], dtype=np.float32)
predictions = self.predict_fn(dataset_dict_output[0])
# TODO: Is it okay to insert a column in the original dataframe with the predicted outcome? This is memory-efficient
self.data_interface.data_df[self.predicted_outcome_name] = predictions
# segmenting the dataset according to outcome
dataset_with_predictions = {}
dataset_with_predictions_size = {}
for i in range(2):
dataset_with_predictions[i] = self.data_interface.data_df.loc[np.round(predictions) == i].copy()
dataset_with_predictions_size[i] = len(self.data_interface.data_df.loc[np.round(predictions) == i])
# Prepares the KD trees for DiCE - 1 for each outcome (here only 0 and 1, binary classification)
return dataset_with_predictions, dataset_with_predictions_size, \
{i: KDTree(pd.get_dummies(dataset_with_predictions[i][self.data_interface.feature_names])) for i in
range(2)}, predictions
def generate_counterfactuals(self, query_instance, total_CFs, desired_class="opposite", features_to_vary="all",
permitted_range=None, training_points_only=True,
feature_weights="inverse_mad", stopping_threshold=0.5, posthoc_sparsity_param=0.1,
posthoc_sparsity_algorithm="linear", verbose=True):
"""Generates diverse counterfactual explanations
:param query_instance: A dictionary of feature names and values. Test point of interest.
:param total_CFs: Total number of counterfactuals required.
:param desired_class: Desired counterfactual class - can take 0 or 1. Default value is "opposite" to the outcome class of query_instance for binary classification.
:param features_to_vary: Either a string "all" or a list of feature names to vary.
:param permitted_range: Dictionary with continuous feature names as keys and permitted min-max range in list as values. Defaults to the range inferred from training data. If None, uses the parameters initialized in data_interface.
:param training_points_only: Parameter to determine if the returned counterfactuals should be a subset of the training data points
:param feature_weights: Either "inverse_mad" or a dictionary with feature names as keys and corresponding weights as values. Default option is "inverse_mad" where the weight for a continuous feature is the inverse of the Median Absolute Devidation (MAD) of the feature's values in the training set; the weight for a categorical feature is equal to 1 by default.
:param stopping_threshold: Minimum threshold for counterfactuals target class probability.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes "linear" or "binary". Prefer binary search when a feature range is large (for instance, income varying from 10k to 1000k) and only if the features share a monotonic relationship with predicted outcome in the model.
:param verbose: Parameter to determine whether to print 'Diverse Counterfactuals found!'
:return: A CounterfactualExamples object to store and visualize the resulting counterfactual explanations (see diverse_counterfactuals.py).
"""
# check feature MAD validity and throw warnings
if feature_weights == "inverse_mad":
self.data_interface.get_valid_mads(display_warnings=True, return_mads=False)
if features_to_vary == 'all':
features_to_vary = self.data_interface.feature_names
query_instance, test_pred, final_cfs, cfs_preds = self.find_counterfactuals(query_instance, desired_class,
total_CFs, features_to_vary,
permitted_range,
training_points_only,
stopping_threshold,
posthoc_sparsity_param,
posthoc_sparsity_algorithm, verbose)
return exp.CounterfactualExamples(self.data_interface, query_instance, test_pred, final_cfs, cfs_preds,
self.final_cfs_sparse, self.cfs_preds_sparse, posthoc_sparsity_param,
desired_class)
def predict_fn(self, input_instance):
"""prediction function"""
temp_preds = self.model.get_output(input_instance)[:, self.num_output_nodes - 1]
return temp_preds
def get_samples_eps(self, features_to_vary, eps, sample_size, cf, mads, query_instance, desired_class, cfs_needed):
"""This function generates counterfactuals in the epsilon-vicinity of a given counterfactual such that it
varies only features_to_vary """
cfs_found = []
cfs_found_preds = []
# The maximum number of counterfactuals this method will generate is sample_size
for i in range(sample_size):
temp_cf = {}
for j in range(len(self.data_interface.feature_names)):
feature = self.data_interface.feature_names[j]
if feature in features_to_vary:
if feature in self.data_interface.categorical_feature_names:
# picking a random value for the feature if it is categorical
temp_cf[feature] = random.choice(self.data_interface.data_df[feature].unique ())
else:
# picking a value in the epsilon vicinity of the given counterfactual's feature value if it is continuous
minx = max(self.data_interface.permitted_range[feature][0], cf[feature] - eps * mads[feature])
maxx = min(self.data_interface.permitted_range[feature][1], cf[feature] + eps * mads[feature])
temp_cf[feature] = np.random.uniform(minx, maxx)
else:
temp_cf[feature] = query_instance[feature]
temp_cf = self.data_interface.prepare_query_instance(query_instance=temp_cf,
encoding='one-hot')
temp_cf = np.array([temp_cf.iloc[0].values])
test_pred = self.predict_fn(temp_cf)[0]
# if the instance generated is actually a counterfactual
if np.round(test_pred) == desired_class:
cfs_found.append(temp_cf)
cfs_found_preds.append(test_pred)
if len(cfs_found) == cfs_needed:
return cfs_found, cfs_found_preds
return cfs_found, cfs_found_preds
def vary_only_features_to_vary(self, desired_class, KD_query_instance, total_CFs, features_to_vary, query_instance,
training_points_only):
"""This function ensures that we only vary features_to_vary when generating counterfactuals"""
# sampling k^2 points closest points from the KD tree.
# TODO: this should be a user-specified parameter
points_from_KD_tree = total_CFs * total_CFs
num_queries = min(points_from_KD_tree, self.dataset_with_predictions_size[desired_class])
indices = self.KD_tree[desired_class].query(KD_query_instance, num_queries)[1][0]
cfs = self.dataset_with_predictions[desired_class][self.data_interface.feature_names].iloc[indices].copy()
final_cfs = []
final_indices = []
cfs_preds = []
total_cfs_found = 0
# first, iterating through the closest points from the KD tree and checking if any of these are valid
for i in range(len(cfs)):
if total_cfs_found == total_CFs:
break
valid_cf_found = True
for feature in self.data_interface.feature_names:
if feature not in features_to_vary and cfs.iloc[i][feature] != query_instance[feature]:
valid_cf_found = False
break
if valid_cf_found:
total_cfs_found += 1
final_indices.append(i)
if total_cfs_found > 0:
final_cfs_temp = cfs.iloc[final_indices].to_dict('records')
final_cfs_temp = self.data_interface.prepare_query_instance(query_instance=final_cfs_temp,
encoding='one-hot').values
final_cfs = [final_cfs_temp[i, :].reshape(1, -1) for i in range(final_cfs_temp.shape[0])]
# Finding the predicted outcome for each cf
for i in range(total_cfs_found):
cfs_preds.append(
self.dataset_with_predictions[desired_class].iloc[final_indices[i]][self.predicted_outcome_name])
if total_cfs_found >= total_CFs or training_points_only:
return final_cfs[:total_CFs], cfs_preds
print(total_cfs_found, "Counterfactuals found so far. Moving on to non-training points")
# Now, generating counterfactuals that aren't part of the training data
i = 0
j = 0
start_eps = 1
eps1 = start_eps
eps2 = start_eps
mads = self.data_interface.get_valid_mads()
max_eps = self.data_interface.max_range
# TODO: this should be a user-specified parameter
sample_size = max(50, total_CFs*4)
stop_method_1 = False
stop_method_2 = False
# This part of the code randomly samples points within a ball of epsilon around each point obtained from the KD tree.
while (not stop_method_1) or (not stop_method_2):
# Method 1 implements perturbations of all valid radii before proceeding to the next instance
if not stop_method_1:
cfs_found, cfs_found_preds = self.get_samples_eps(features_to_vary, eps1, sample_size, cfs.iloc[i],
mads, query_instance, desired_class,
total_CFs - total_cfs_found)
final_cfs.extend(cfs_found)
cfs_preds.extend(cfs_found_preds)
total_cfs_found += len(cfs_found)
# if total_CFs number of counterfactuals are already found, return
if total_cfs_found == total_CFs:
return final_cfs, cfs_preds
# double epsilon until it reaches the maximum value
eps1 *= 2
if eps1 > max_eps:
eps1 = start_eps
i += 1
# stop method 1 when you have iterated through all instances
if i == num_queries:
stop_method_1 = True
# Method 2 implements perturbations of particular radius for all instances before doubling the radius
if not stop_method_2:
cfs_found, cfs_found_preds = self.get_samples_eps(features_to_vary, eps2, sample_size, cfs.iloc[j],
mads, query_instance, desired_class,
total_CFs - total_cfs_found)
final_cfs.extend(cfs_found)
cfs_preds.extend(cfs_found_preds)
total_cfs_found += len(cfs_found)
# if total_CFs number of counterfactuals are already found, return
if total_cfs_found == total_CFs:
return final_cfs, cfs_preds
# double epsilon when all instances have been covered
if j == num_queries - 1:
j = -1
eps2 *= 2
# stop method 2 when epsilon has reached the maximum value
if eps2 > max_eps:
stop_method_2 = True
j += 1
return final_cfs, cfs_preds
def find_counterfactuals(self, query_instance, desired_class, total_CFs, features_to_vary, permitted_range,
training_points_only, stopping_threshold,
posthoc_sparsity_param, posthoc_sparsity_algorithm, verbose):
"""Finds counterfactuals by querying a K-D tree for the nearest data points in the desired class from the dataset."""
# Prepares user defined query_instance for DiCE.
query_instance_orig = query_instance
query_instance = self.data_interface.prepare_query_instance(query_instance=query_instance, encoding='one-hot')
query_instance = np.array([query_instance.iloc[0].values])
# find the predicted value of query_instance
test_pred = self.predict_fn(query_instance)[0]
if desired_class == "opposite":
desired_class = 1.0 - np.round(test_pred)
else:
desired_class = np.round(test_pred)
self.target_cf_class = np.array([[desired_class]], dtype=np.float32)
self.stopping_threshold = stopping_threshold
if self.target_cf_class == 0 and self.stopping_threshold > 0.5:
self.stopping_threshold = 0.25
elif self.target_cf_class == 1 and self.stopping_threshold < 0.5:
self.stopping_threshold = 0.75
query_instance_copy = query_instance_orig.copy()
# preparing query instance for conversion to pandas dataframe
for q in query_instance_copy:
query_instance_copy[q] = [query_instance_copy[q]]
query_instance_df = pd.DataFrame.from_dict(query_instance_copy)
start_time = timeit.default_timer()
# Making the one-hot-encoded version of query instance match the one-hot encoded version of the dataset
query_instance_df_dummies = | pd.get_dummies(query_instance_df) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME>
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, unicode_literals
import pandas as pd
import re
import defusedxml
from defusedxml.common import EntitiesForbidden
def defuse():
defusedxml.defuse_stdlib()
class ValidationException(Exception):
def readable(self, indent=0):
if self.__cause__:
if isinstance(self.__cause__, ValidationException):
yield " "*indent + str(self) + ":"
yield from self.__cause__.readable(indent=indent+1)
else:
yield " "*indent + str(self) + ": " + str(self.__cause__)
else:
yield " "*indent + str(self)
@property
def long_message(self):
return "\n".join(self.readable())
def __eq__(self, other):
return self.long_message == other.long_message
def __lt__(self, other):
return self.long_message < other.long_message
def __hash__(self):
return hash(self.long_message)
class MultiValidationException(ValidationException):
def __init__(self, multi, *args, **kwargs):
super().__init__(*args, **kwargs)
self.multi = list(set(multi))
self.multi.sort()
def readable(self, indent=0):
yield " "*indent + f"{str(self)}:"
for e in self.multi:
yield from e.readable(indent=indent+1)
class AddResults:
def __init__(self, person, result_collector, **results_to_add):
self.person = person
self.result_collector = result_collector
self.results_to_add = results_to_add
def describe(self):
print(f"For person {self.person.index}:")
for (key,value) in self.results_to_add.items():
print(f" - {key}: {value}")
def doit(self):
self.result_collector.set_results(self.person.index, **self.results_to_add)
class ResultCollector:
def __init__(self, sheet, fileupdater):
self.sheet = sheet
self.results = self.sheet
self.fileupdater = fileupdater
def get_value(self, row, key):
if key not in self.results.keys():
return None
value = self.results[key][row]
if pd.isnull(value) or value == "":
return None
return value
def set_results(self, row, **new_results):
for key in new_results.keys():
if key not in self.results.keys():
self.results = self.results.assign(**{key:""})
col = self.results[key].copy()
col[row] = new_results[key]
self.results = self.results.assign(**{key:col})
self.fileupdater.write_callback(self.results)
print(self.results)
class FileAnalysis:
def __init__(self, path):
interpretations = [ValidOr(SheetReadAnalysis, path, header_row_shift=header) for header in range(4)]
valid_interpretations = [interpretation for interpretation in interpretations if interpretation.res]
if len(valid_interpretations) == 0:
raise MultiValidationException([c.e for c in interpretations], "No valid interpretatons as a file with a header column")
if len(valid_interpretations) > 1:
raise ValidationException("Too many valid interpretatons")
interpretation = valid_interpretations[0].res
self.sheet = interpretation.sheet
self.columns = interpretation.columns
self.interpretation = self.columns.interpretation
self.fileupdater = interpretation.fileupdater
def get_writer(self, path):
return ResultCollector(self.sheet, self.fileupdater)
def print(self):
self.interpretation.print()
class SheetReadAnalysis:
def __init__(self, path, header_row_shift=0):
sheet = read_file(path, header=header_row_shift)
try:
interpretation = Analysis(sheet)
except ValidationException as e:
raise ValidationException(f"Read with header shifted {header_row_shift} rows down failed") from e
orig_sheet = read_file(path, header=None)
self.fileupdater = SheetUpdater(path, orig_sheet, startrow=header_row_shift)
self.sheet = interpretation.sheet
self.columns = interpretation.columns
self.interpretation = self.columns.interpretation
class SheetUpdater:
def __init__(self, path, orig_sheet, **update_kwargs):
def write_callback(new_sheet):
writer = pd.ExcelWriter(path)
# Write orig_sheet exactly as inputed
orig_sheet.to_excel(writer, header=None, index=False)
# Write new_sheet at the location where we read it before.
new_sheet.to_excel(writer, index=False, **update_kwargs)
writer.save()
self.write_callback = write_callback
class Analysis:
def __init__(self, sheet):
self.sheet = sheet
self.columns = ColumnsAnalysis(sheet)
self.interpretation = self.columns.interpretation
class ColumnsAnalysis:
def __init__(self, sheet):
self.sheet = sheet
columnanalyses_or = [ValidOr(ColumnAnalysis, sheet[column]) for column in sheet.columns]
self.interpretation = InterpretationCandidates([PersonList]).find_one(columnanalyses_or)
class PersonList:
def __init__(self, columnanalyses_or):
columns = {columnanalysis_or.res.interpretation.key:columnanalysis_or.res for columnanalysis_or in columnanalyses_or if columnanalysis_or.res}
keys = {
'pnr': {'required': True },
'family_name': {'required': True},
'given_name': {'required': True},
'email': {'required': False}
}
for (key,keyinfo) in keys.items():
if keyinfo['required']:
matching_columns = [columnanalysis_or.res for columnanalysis_or in columnanalyses_or if columnanalysis_or.res and columnanalysis_or.res.interpretation.key == key]
if len(matching_columns) > 1:
raise ValidationException("multiple columns for {key}")
for (key,keyinfo) in keys.items():
if keyinfo['required'] and key not in columns:
raise MultiValidationException([columnanalysis_or.e for columnanalysis_or in columnanalyses_or if columnanalysis_or.res is None], f"not enough data, missing {key}")
if key in columns:
keys[key]['column'] = columns[key]
self.columns = keys
column_row_sizes = [len(self.columns[key]['column'].column) for key in self.columns.keys() if 'column' in self.columns[key]]
if min(column_row_sizes) != max(column_row_sizes):
raise ValidationException("mismatched columns")
return
def renamed_column(key):
if 'column' in self.columns[key]:
column = self.columns[key]['column'].column
return column
#return column.rename(key)
return None
rows = pd.concat([column for column in [renamed_column(key) for key in self.columns.keys()] if column is not None], axis=1)
for key in self.columns.keys():
if 'column' in self.columns[key]:
rows = rows.assign(**{key:self.columns[key]['column'].interpretation.found_data})
self.new_sheet = rows
valid_rows = None
max_valid_rows = 0
max_valid_column = None
for (key,keyinfo) in keys.items():
if keyinfo['required']:
column_valid_rows = set(keys[key]['column'].interpretation.valid_rows)
if len(column_valid_rows) > max_valid_rows:
max_valid_rows = len(column_valid_rows)
max_valid_column = key
if valid_rows is None:
valid_rows = column_valid_rows
else:
valid_rows = valid_rows.intersection(column_valid_rows)
if 100 * len(valid_rows) / max_valid_rows < 80:
raise ValidationException(f"Too many unmatched rows in {max_valid_column} column")
valid_rows = list(valid_rows)
valid_rows.sort()
persons = [Person(rows,i) for i in valid_rows]
self.persons = persons
self.items_type = 'persons'
self.valid_rows = valid_rows
def print(self):
print("Person information:")
def column_for_print(key):
if 'column' in self.columns[key]:
column = self.columns[key]['column'].column
return column.rename(f"{key} ({column.name})")
return None
key_names = sorted(self.columns.keys())
c = pd.concat([column for column in [column_for_print(key) for key in key_names] if column is not None], axis=1)
print(c)
#print(self.new_sheet)
for person in self.persons:
person.print()
class Person:
def __init__(self, rows, index):
self.index = index
row = rows[index:index+1]
self.pnr = row['pnr'].values[0].replace("-", "").replace(" ", "")
if len(self.pnr) == 12:
self.pnr = self.pnr[2:]
if len(self.pnr) == 10 and self.pnr[6:8] == "TF":
self.pnr = self.pnr[0:6]
if len(self.pnr) == 8:
self.pnr = self.pnr[2:]
self.given_name = row['given_name'].values[0]
self.family_name = row['family_name'].values[0]
self.email = row['email'].values[0] if 'email' in row else None
def print(self):
print(f"row {self.index}: pnr {self.pnr}, given name {self.given_name}, family name {self.family_name}, email {self.email}")
class ColumnAnalysis:
def __init__(self, column):
self.column = column
self.interpretation = InterpretationCandidates([FamilyNameColumn, GivenNameColumn, PnrColumn, EmailColumn]).find_one(column)
class ValidOr:
def __init__(self, f, *args, **kwargs):
self.res = None
self.e = None
try:
self.res = f(*args, **kwargs)
except ValidationException as e:
self.e = e
class InterpretationCandidates:
def __init__(self, classes):
self.classes = classes
def find_one(self, *args, **kwargs):
interpretation_candidates = [ValidOr(interpretation, *args, **kwargs) for interpretation in self.classes]
valid_interpretations = [interpretation_candidate.res for interpretation_candidate in interpretation_candidates if interpretation_candidate.res]
if len(valid_interpretations) == 0:
raise MultiValidationException([c.e for c in interpretation_candidates], "No valid interpretatons")
if len(valid_interpretations) > 1:
valid_interpretations.sort(key=lambda x: -len(self.valid_rows))
if len(valid_interpretations[0].valid_rows) > len(valid_interpretations[1].valid_rows):
return valid_interpretations[0]
raise ValidationException("Too many valid interpretatons")
return valid_interpretations[0]
class NameColumn:
KEY = None
NAME_RE = None
MIN_LENGTH = 2
def __init__(self, column):
try:
name = str(column.name.strip())
except Exception as e:
raise ValidationException(f"Could not parse column name '{column.name}'") from e
if not self.NAME_RE.match(name):
raise ValidationException(f"Unrecognized column name '{column.name}'")
def istext(s):
if pd.isna(s):
return False
s = str(s).strip()
return len(s) >= self.MIN_LENGTH and all([c.isalpha() or c.isspace() or c == "-" for c in s])
num_rows = len(column)
valid_rows = [i for i in column.index if istext(column[i])]
if 100 * len(valid_rows) / num_rows < 60:
raise ValidationException(f"Content of column '{column.name}' is not mostly alphabetical")
self.column = column
self.names = [str(row).strip() for row in column.convert_dtypes()]
self.found_data = self.names
self.key = self.KEY
self.valid_rows = valid_rows
class FamilyNameColumn(NameColumn):
KEY = "family_name"
NAME_RE = re.compile("((last|family).*name|efternamn)", flags=re.I)
MIN_LENGTH = 1
class GivenNameColumn(NameColumn):
KEY = "given_name"
NAME_RE = re.compile("((first|given).*name|förnamn)", flags=re.I)
class PnrColumn:
NAME_RE = re.compile("((person|p|t).*(number|nmr|nr|nummer))|(birth(day|date)|födelse(dag|datum))", flags=re.I)
def __init__(self, column):
try:
name = str(column.name.strip())
except Exception as e:
raise ValidationException(f"Could not parse column name '{column.name}'") from e
if not self.NAME_RE.match(name):
raise ValidationException(f"Unrecognized column name '{column.name}'")
pnrs = column.astype("string").str.extract(r'(((19|20)\d\d|\d\d)[01]\d[0-3]\d *((-|) *[T\d][\dF]\d\d|))')[0]
valid_rows = [i for i in pnrs.index if not pd.isna(pnrs[i])]
if 100 * len(valid_rows) / len(pnrs) < 60:
raise ValidationException("Content does not match pnr data")
self.column = column
self.pnrs = pnrs
self.found_data = self.pnrs
self.key = "pnr"
self.valid_rows = valid_rows
class EmailColumn:
NAME_RE = re.compile("(e*-*mail|e*-*post)(adress|address)", flags=re.I)
def __init__(self, column):
try:
name = str(column.name.strip())
except Exception as e:
raise ValidationException(f"Could not parse column name '{column.name}'") from e
if not self.NAME_RE.match(name):
raise ValidationException(f"Unrecognized column name '{column.name}'")
emails = column.convert_dtypes().str.extract('([\w\.]+@\w[\w\.]*\w\w)', flags=re.U)[0]
valid_rows = [i for i in emails.index if not pd.isna(emails[i])]
if 100 * len(valid_rows) / len(emails) < 60:
raise ValidationException("Content is not valid email addresses")
self.column = column
self.emails = emails
self.found_data = self.emails
self.key = "email"
self.valid_rows = valid_rows
def read_file(path, *args, **kwargs):
if path.endswith(".xlsx"):
return pd.read_excel(path, *args, **kwargs)
if path.endswith(".xls"):
return | pd.read_excel(path, *args, **kwargs) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
# Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
# Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
# Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost,
Pool_Revenues, Reserve_Revenues],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
# Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1e6 #Convert cost to millions
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.plot(Net_Revenue.index, Net_Revenue.values,
color='black', linestyle='None', marker='o',
label='Net Revenue')
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = | pd.concat(total_cost_chunk, axis=0, sort=False) | pandas.concat |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
| tm.assert_almost_equal(result, exp) | pandas._testing.assert_almost_equal |
import importlib
from hydroDL.data import gridMET
from hydroDL import kPath
import numpy as np
import pandas as pd
import os
import time
import argparse
"""
convert raw data to tab format of each sites
"""
workDir = kPath.dirWQ
dataFolder = os.path.join(kPath.dirData, 'gridMET')
maskFolder = os.path.join(kPath.dirData, 'USGS', 'gridMET', 'mask')
rawFolder = os.path.join(kPath.dirData, 'USGS', 'gridMET', 'raw')
saveFolder = os.path.join(kPath.dirData, 'USGS', 'gridMET', 'output')
# create mask for all USGS basins: gridMetMask.py
# extract gridMet data for all USGS basins: gridMetExtract.py
# setup information
varLst = ['pr', 'sph', 'srad', 'tmmn', 'tmmx', 'pet', 'etr']
yrLst = range(1979, 2020)
nSite = 7111
iSLst = list(range(0, nSite, 2000))
iELst = iSLst[1:]+[nSite]
for iS, iE in zip(iSLst, iELst):
errLst = list()
dataDict = dict()
for var in varLst:
tempLst = list()
for yr in yrLst:
print('reading {} year {} {}-{}'.format(var, yr, iS, iE))
fileName = os.path.join(
rawFolder, '{}_{}_{}_{}.csv'.format(var, yr, iS, iE))
temp = | pd.read_csv(fileName, index_col=0) | pandas.read_csv |
"""Tests for model_selection.py."""
import numpy as np
import pandas as pd
import pytest
from fclearn.model_selection import create_rolling_forward_indices, train_test_split
groupby = ["SKUID", "ForecastGroupID"]
class TestTrainTestSplit:
"""Test train_test_split()."""
def test_one(self, demand_df):
"""Whether splits on date."""
X = pd.DataFrame(
data=[["2017-01-02", 1], ["2017-01-03", 2]], columns=["Date", "value"]
)
y = pd.DataFrame(
data=[["2017-01-02", 3], ["2017-01-03", 4]], columns=["Date", "value"]
)
X["Date"] = pd.to_datetime(X["Date"])
y["Date"] = pd.to_datetime(y["Date"])
X.set_index("Date", inplace=True)
y.set_index("Date", inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, "2017-01-03")
# X_train
pd.testing.assert_frame_equal(
X_train,
pd.DataFrame(
data=[1],
columns=["value"],
index=pd.Index([pd.to_datetime("2017-01-02")], name="Date"),
),
)
# X_test
pd.testing.assert_frame_equal(
X_test,
pd.DataFrame(
data=[2],
columns=["value"],
index=pd.Index([pd.to_datetime("2017-01-03")], name="Date"),
),
)
# y_train
pd.testing.assert_frame_equal(
y_train,
pd.DataFrame(
data=[3],
columns=["value"],
index=pd.Index([pd.to_datetime("2017-01-02")], name="Date"),
),
)
# y_test
pd.testing.assert_frame_equal(
y_test,
pd.DataFrame(
data=[4],
columns=["value"],
index=pd.Index([pd.to_datetime("2017-01-03")], name="Date"),
),
)
class TestRollingCV:
"""Test create_rolling_forward_indices()."""
def test_start_warning(self, demand_df):
"""Gives warning when start date is not monday."""
pytest.warns(
Warning,
create_rolling_forward_indices,
demand_df,
groupby,
"2017-01-17",
"2017-01-22",
7,
7,
7,
)
def test_end_warning(self, demand_df):
"""Gives warning when end date is not sunday."""
pytest.warns(
Warning,
create_rolling_forward_indices,
demand_df,
groupby,
"2017-01-16",
"2017-01-23",
7,
7,
7,
)
def test_one(self, demand_df):
"""Two folds are received when start and stop are 13 days apart."""
start_date = pd.to_datetime("2017-01-16")
cv = create_rolling_forward_indices(
demand_df,
groupby,
start_date,
start_date + np.timedelta64(13, "D"),
7,
7,
7,
)
assert len(cv) == 2
def test_two(self, demand_df):
"""Gap between the last item train set and first item test is 7 days."""
start_date = | pd.to_datetime("2017-01-16") | pandas.to_datetime |
import numpy as np
import pytest
from pandas import Series, Timestamp, isna
import pandas._testing as tm
class TestSeriesArgsort:
def _check_accum_op(self, name, ser, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(ser).values, func(np.array(ser)), check_dtype=check_dtype,
)
# with missing values
ts = ser.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH#2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
| tm.assert_numpy_array_equal(qindexer, mindexer) | pandas._testing.assert_numpy_array_equal |
"""
Responsible for production of data visualisations and rendering this data as inline
base64 data for various django templates to use.
"""
from datetime import datetime, timedelta
from collections import Counter, defaultdict
from typing import Iterable, Callable
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from pandas.core.base import NoNewAttributesMixin
import plotnine as p9
from lazydict import LazyDictionary
from django.contrib.auth import get_user_model
from app.models import (
Timeframe,
timing,
user_purchases,
all_available_dates,
)
from app.data import (
make_portfolio_dataframe,
cache_plot,
make_portfolio_performance_dataframe,
price_change_bins,
calc_ma_crossover_points,
)
from plotnine.layer import Layers
def cached_portfolio_performance(user):
assert isinstance(user, get_user_model())
username = user.username
overall_key = f"{username}-portfolio-performance"
stock_key = f"{username}-stock-performance"
contributors_key = f"{username}-contributor-performance"
def data_factory(
ld: LazyDictionary,
): # dont create the dataframe unless we have to - avoid exxpensive call!
purchase_buy_dates = []
purchases = []
stocks = []
for stock, purchases_for_stock in user_purchases(user).items():
stocks.append(stock)
for purchase in purchases_for_stock:
purchase_buy_dates.append(purchase.buy_date)
purchases.append(purchase)
purchase_buy_dates = sorted(purchase_buy_dates)
# print("earliest {} latest {}".format(purchase_buy_dates[0], purchase_buy_dates[-1]))
timeframe = Timeframe(
from_date=str(purchase_buy_dates[0]), to_date=all_available_dates()[-1]
)
return make_portfolio_performance_dataframe(stocks, timeframe, purchases)
ld = LazyDictionary()
ld["df"] = lambda ld: data_factory(ld)
return (
cache_plot(overall_key, plot_overall_portfolio, datasets=ld),
cache_plot(stock_key, plot_portfolio_stock_performance, datasets=ld),
cache_plot(contributors_key, plot_portfolio_contributors, datasets=ld),
)
def user_theme(
plot: p9.ggplot,
x_axis_label: str = "",
y_axis_label: str = "",
title: str = "",
**plot_theme,
) -> p9.ggplot:
"""Render the specified plot in the current theme with common attributes to all plots eg. legend_position etc. The themed plot is
returned. Saves code in each plot by handled all the standard stuff here."""
theme_args = { # TODO FIXME... make defaults chosen from user profile
"axis_text_x": p9.element_text(size=7),
"axis_text_y": p9.element_text(size=7),
"figure_size": (12, 6),
"legend_position": "none",
}
theme_args.update(**plot_theme)
# remove asxtrade kwargs
want_cmap_d = theme_args.pop("asxtrade_want_cmap_d", True)
want_fill_d = theme_args.pop(
"asxtrade_want_fill_d", False
) # most graphs dont fill, so False by default
want_fill_continuous = theme_args.pop("asxtrade_want_fill_continuous", False)
plot = (
plot
+ p9.theme_bw() # TODO FIXME... make chosen theme from user profile
+ p9.labs(x=x_axis_label, y=y_axis_label, title=title)
+ p9.theme(**theme_args)
)
if want_cmap_d:
plot += p9.scale_colour_cmap_d()
if want_fill_d:
plot += p9.scale_fill_cmap_d()
elif want_fill_continuous:
plot += p9.scale_fill_cmap()
return plot
def make_sentiment_plot(sentiment_df, exclude_zero_bin=True, plot_text_labels=True):
rows = []
print(
"Sentiment plot: exclude zero bins? {} show text? {}".format(
exclude_zero_bin, plot_text_labels
)
)
for column in filter(lambda c: c.startswith("bin_"), sentiment_df.columns):
c = Counter(sentiment_df[column])
date = column[4:]
for bin_name, val in c.items():
if exclude_zero_bin and (
bin_name == "0.0" or not isinstance(bin_name, str)
):
continue
bin_name = str(bin_name)
assert isinstance(bin_name, str)
val = int(val)
rows.append(
{
"date": datetime.strptime(date, "%Y-%m-%d"),
"bin": bin_name,
"value": val,
}
)
df = pd.DataFrame.from_records(rows)
# print(df['bin'].unique())
bins, labels = price_change_bins() # pylint: disable=unused-variable
order = filter(
lambda s: s != "0.0", labels
) # dont show the no change bin since it dominates the activity heatmap
df["bin_ordered"] = pd.Categorical(df["bin"], categories=order)
plot = p9.ggplot(df, p9.aes("date", "bin_ordered", fill="value")) + p9.geom_tile(
show_legend=False
)
if plot_text_labels:
plot = plot + p9.geom_text(p9.aes(label="value"), size=8, color="white")
return user_theme(plot, y_axis_label="Daily change (%)")
@timing
def plot_fundamentals(
df: pd.DataFrame,
stock: str,
line_size=1.5, # pylint: disable=unused-argument
columns_to_report=(
"pe",
"eps",
"annual_dividend_yield",
"volume",
"last_price",
"change_in_percent_cumulative",
"change_price",
"market_cap",
"number_of_shares",
),
) -> str:
plot_df = pd.melt(
df,
id_vars="fetch_date",
value_vars=columns_to_report,
var_name="indicator",
value_name="value",
)
plot_df = plot_df[plot_df["indicator"].isin(columns_to_report)]
plot_df["value"] = pd.to_numeric(plot_df["value"])
plot_df = plot_df.dropna(axis=0, subset=["value"], how="any")
n = len(columns_to_report)
plot = (
p9.ggplot(
plot_df,
p9.aes("fetch_date", "value", colour="indicator"),
)
+ p9.geom_path(show_legend=False, size=line_size)
+ p9.facet_wrap("~ indicator", nrow=n, ncol=1, scales="free_y")
)
return user_theme(plot, figure_size=(12, n))
def plot_overall_portfolio(
ld: LazyDictionary,
figure_size=(12, 4),
line_size=1.5,
date_text_size=7,
) -> p9.ggplot:
"""
Given a daily snapshot of virtual purchases plot both overall and per-stock
performance. Return a ggplot instance representing the visualisation
"""
portfolio_df = ld["df"]
df = portfolio_df.filter(
items=["portfolio_cost", "portfolio_worth", "portfolio_profit", "date"]
)
df = df.melt(id_vars=["date"], var_name="field")
plot = (
p9.ggplot(df, p9.aes("date", "value", group="field", color="field"))
+ p9.geom_line(size=line_size)
+ p9.facet_wrap("~ field", nrow=3, ncol=1, scales="free_y")
)
return user_theme(
plot,
y_axis_label="$ AUD",
axis_text_x=p9.element_text(angle=30, size=date_text_size),
)
def plot_portfolio_contributors(ld: LazyDictionary, figure_size=(11, 5)) -> p9.ggplot:
df = ld["df"]
melted_df = make_portfolio_dataframe(df, melt=True)
all_dates = sorted(melted_df["date"].unique())
df = melted_df[melted_df["date"] == all_dates[-1]]
# print(df)
df = df[df["field"] == "stock_profit"] # only latest profit is plotted
df["contribution"] = [
"positive" if profit >= 0.0 else "negative" for profit in df["value"]
]
# 2. plot contributors ie. winners and losers
plot = (
p9.ggplot(df, p9.aes("stock", "value", group="stock", fill="stock"))
+ p9.geom_bar(stat="identity")
+ p9.facet_grid("contribution ~ field", scales="free_y")
)
return user_theme(
plot, y_axis_label="$ AUD", figure_size=figure_size, asxtrade_want_fill_d=True
)
def plot_portfolio_stock_performance(
ld: LazyDictionary, figure_width: int = 12, date_text_size=7
) -> p9.ggplot:
df = ld["df"]
df = df[df["stock_cost"] > 0.0]
# latest_date = df.iloc[-1, 6]
# latest_profit = df[df["date"] == latest_date]
# print(df)
pivoted_df = df.pivot(index="stock", columns="date", values="stock_profit")
latest_date = pivoted_df.columns[-1]
# print(latest_date)
mean_profit = pivoted_df.mean(axis=1)
n_stocks = len(mean_profit)
# if we want ~4 stocks per facet plot, then we need to specify the appropriate calculation for df.qcut()
bins = pd.qcut(mean_profit, int(100 / n_stocks) + 1)
# print(bins)
df = df.merge(bins.to_frame(name="bins"), left_on="stock", right_index=True)
# print(df)
textual_df = df[df["date"] == latest_date]
# print(textual_df)
# melted_df = make_portfolio_dataframe(df, melt=True)
plot = (
p9.ggplot(df, p9.aes("date", "stock_profit", group="stock", colour="stock"))
+ p9.geom_smooth(size=1.0, span=0.3, se=False)
+ p9.facet_wrap("~bins", ncol=1, nrow=len(bins), scales="free_y")
+ p9.geom_text(
p9.aes(x="date", y="stock_profit", label="stock"),
color="black",
size=9,
data=textual_df,
position=p9.position_jitter(width=10, height=10),
)
)
return user_theme(
plot,
y_axis_label="$ AUD",
figure_size=(figure_width, int(len(bins) * 1.2)),
axis_text_x=p9.element_text(angle=30, size=date_text_size),
)
def plot_company_rank(ld: LazyDictionary) -> p9.ggplot:
df = ld["rank"]
# assert 'sector' in df.columns
n_bin = len(df["bin"].unique())
# print(df)
plot = (
p9.ggplot(df, p9.aes("date", "rank", group="asx_code", color="asx_code"))
+ p9.geom_smooth(span=0.3, se=False)
+ p9.geom_text(
p9.aes(label="asx_code", x="x", y="y"),
nudge_x=1.2,
size=6,
show_legend=False,
)
+ p9.facet_wrap("~bin", nrow=n_bin, ncol=1, scales="free_y")
)
return user_theme(
plot,
figure_size=(12, 20),
subplots_adjust={"right": 0.8},
)
def plot_company_versus_sector(
df: pd.DataFrame, stock: str, sector: str # pylint: disable=unused-argument
) -> p9.ggplot:
if df is None or len(df) < 1:
print("No data for stock vs. sector plot... ignored")
return None
df["date"] = pd.to_datetime(df["date"])
# print(df)
plot = p9.ggplot(
df, p9.aes("date", "value", group="group", color="group", fill="group")
) + p9.geom_line(size=1.5)
return user_theme(
plot,
y_axis_label="Change since start (%)",
subplots_adjust={"right": 0.8},
legend_position="right",
)
def plot_market_wide_sector_performance(ld: LazyDictionary) -> p9.ggplot:
"""
Display specified dates for average sector performance. Each company is assumed to have at zero
at the start of the observation period. A plot as base64 data is returned.
"""
all_stocks_cip = ld["sector_cumsum_df"]
n_stocks = len(all_stocks_cip)
# merge in sector information for each company
code_and_sector = ld["stocks_by_sector"]
n_unique_sectors = len(code_and_sector["sector_name"].unique())
print("Found {} unique sectors".format(n_unique_sectors))
# print(df)
# print(code_and_sector)
df = all_stocks_cip.merge(code_and_sector, left_index=True, right_on="asx_code")
print(
"Found {} stocks, {} sectors and merged total: {}".format(
n_stocks, len(code_and_sector), len(df)
)
)
# print(df)
grouped_df = df.groupby("sector_name").mean()
# print(grouped_df)
# ready the dataframe for plotting
grouped_df = pd.melt(
grouped_df,
ignore_index=False,
var_name="date",
value_name="cumulative_change_percent",
)
grouped_df["sector"] = grouped_df.index
grouped_df["date"] = pd.to_datetime(grouped_df["date"])
n_col = 3
plot = (
p9.ggplot(
grouped_df, p9.aes("date", "cumulative_change_percent", color="sector")
)
+ p9.geom_line(size=1.5)
+ p9.facet_wrap(
"~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y"
)
)
return user_theme(
plot,
y_axis_label="Average sector change (%)",
panel_spacing=0.3,
axis_text_x=p9.element_text(angle=30, size=7),
)
def plot_series(
df,
x=None,
y=None,
tick_text_size=6,
line_size=1.5,
y_axis_label="Point score",
x_axis_label="",
color="stock",
use_smooth_line=False,
):
if df is None or len(df) < 1:
return None
assert len(x) > 0 and len(y) > 0
assert line_size > 0.0
assert isinstance(tick_text_size, int) and tick_text_size > 0
assert y_axis_label is not None
assert x_axis_label is not None
args = {"x": x, "y": y}
if color:
args["color"] = color
plot = p9.ggplot(df, p9.aes(**args))
if use_smooth_line:
plot += p9.geom_smooth(
size=line_size, span=0.3, se=False
) # plotnine doesnt support confidence intervals with Loess smoothings, so se=False
else:
plot += p9.geom_line(size=line_size)
return user_theme(
plot,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
axis_text_x=p9.element_text(angle=30, size=tick_text_size),
axis_text_y=p9.element_text(size=tick_text_size),
)
def plot_market_cap_distribution(ld: LazyDictionary) -> p9.ggplot:
df = ld["market_cap_df"]
assert set(df.columns).intersection(set(["market", "market_cap", "bin"])) == set(
["market", "market_cap", "bin"]
)
pos_market_cap_only = df[df["market_cap"] > 0.0]
plot = (
p9.ggplot(pos_market_cap_only)
+ p9.geom_boxplot(p9.aes(x="market", y="market_cap"))
+ p9.facet_wrap("bin", scales="free_y")
+ p9.scales.scale_y_log10()
)
return user_theme(
plot,
y_axis_label="Market cap. ($AUD Millions)",
subplots_adjust={"wspace": 0.30},
)
def plot_breakdown(ld: LazyDictionary) -> p9.ggplot:
"""Stacked bar plot of increasing and decreasing stocks per sector in the specified df"""
cip_df = ld["cip_df"]
cols_to_drop = [colname for colname in cip_df.columns if colname.startswith("bin_")]
df = cip_df.drop(columns=cols_to_drop)
df = pd.DataFrame(df.sum(axis="columns"), columns=["sum"])
ss = ld["stocks_by_sector"]
# ss should be:
# asx_code sector_name
# asx_code
# 14D 14D Industrials
# 1AD 1AD Health Care
# 1AG 1AG Industrials
# 1AL 1AL Consumer Discretionary........
# print(ss)
df = df.merge(ss, left_index=True, right_index=True)
if len(df) == 0: # no stock in cip_df have a sector? ie. ETF?
return None
assert set(df.columns) == set(["sum", "asx_code", "sector_name"])
df["increasing"] = df.apply(
lambda row: "up" if row["sum"] >= 0.0 else "down", axis=1
)
sector_names = (
df["sector_name"].value_counts().index.tolist()
) # sort bars by value count (ascending)
sector_names_cat = pd.Categorical(df["sector_name"], categories=sector_names)
df = df.assign(sector_name_cat=sector_names_cat)
# print(df)
plot = (
p9.ggplot(df, p9.aes(x="factor(sector_name_cat)", fill="factor(increasing)"))
+ p9.geom_bar()
+ p9.coord_flip()
)
return user_theme(
plot,
x_axis_label="Sector",
y_axis_label="Number of stocks",
subplots_adjust={"left": 0.2, "right": 0.85},
legend_title=p9.element_blank(),
asxtrade_want_fill_d=True,
)
def plot_heatmap(
timeframe: Timeframe, ld: LazyDictionary, bin_cb=price_change_bins
) -> p9.ggplot:
"""
Plot the specified data matrix as binned values (heatmap) with X axis being dates over the specified timeframe and Y axis being
the percentage change on the specified date (other metrics may also be used, but you will likely need to adjust the bins)
:rtype: p9.ggplot instance representing the heatmap
"""
df = ld["cip_df"]
bins, labels = bin_cb()
# print(df.columns)
# print(bins)
try:
# NB: this may fail if no prices are available so we catch that error and handle accordingly...
for date in df.columns:
df["bin_{}".format(date)] = pd.cut(df[date], bins, labels=labels)
sentiment_plot = make_sentiment_plot(
df, plot_text_labels=timeframe.n_days <= 30
) # show counts per bin iff not too many bins
return sentiment_plot
except KeyError:
return None
def plot_sector_performance(dataframe: pd.DataFrame, descriptor: str):
assert len(dataframe) > 0
dataframe["date"] = pd.to_datetime(dataframe["date"], format="%Y-%m-%d")
# now do the plot
labels = [
"Number of stocks up >5%",
"Number of stocks down >5%",
"Remaining stocks",
]
# print(dataframe)
dataframe.columns = labels + ["date"]
melted_df = dataframe.melt(value_vars=labels, id_vars="date")
plot = (
p9.ggplot(
melted_df,
p9.aes("date", "value", colour="variable", group="factor(variable)"),
)
+ p9.facet_wrap("~variable", ncol=1, scales="free_y")
+ p9.geom_line(size=1.3)
)
return user_theme(plot)
def auto_dates():
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats = [
"%y", # ticks are mostly years
"%b", # ticks are mostly months
"%d", # ticks are mostly days
"%H:%M", # hrs
"%H:%M", # min
"%S.%f",
] # secs
# these are mostly just the level above...
formatter.zero_formats = [""] + formatter.formats[:-1]
# ...except for ticks that are mostly hours, then it is nice to have
# month-day:
formatter.zero_formats[3] = "%d-%b"
formatter.offset_formats = [
"",
"%Y",
"%b %Y",
"%d %b %Y",
"%d %b %Y",
"%d %b %Y %H:%M",
]
return (locator, formatter)
def relative_strength(prices, n=14):
# see https://stackoverflow.com/questions/20526414/relative-strength-index-in-python-pandas
assert n > 0
assert prices is not None
# Get the difference in price from previous step
delta = prices.diff()
# Get rid of the first row, which is NaN since it did not have a previous
# row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# Calculate the EWMA
roll_up1 = up.ewm(span=n).mean()
roll_down1 = down.abs().ewm(span=n).mean()
# Calculate the RSI based on EWMA
rs = roll_up1 / roll_down1
rsi = 100.0 - (100.0 / (1.0 + rs))
# NB: format is carefully handled here, so downstream code doesnt break
new_date = datetime.strftime(
datetime.now(), "%Y-%m-%d "
) # make sure it is not an existing date
# print(new_date)
rsi.at[new_date] = np.nan # ensure data series are the same length for matplotlib
# print(len(rsi), " ", len(prices))
# assert len(rsi) == len(prices)
return rsi
@timing
def plot_momentum(stock: str, timeframe: Timeframe, ld: LazyDictionary) -> plt.Figure:
assert len(stock) > 0
assert "stock_df" in ld or "stock_df_200" in ld
start_date = timeframe.earliest_date
stock_df = ld["stock_df_200"] if "stock_df_200" in ld else ld["stock_df"]
last_price = stock_df["last_price"]
volume = stock_df["volume"]
day_low_price = stock_df["day_low_price"]
day_high_price = stock_df["day_high_price"]
# print(last_price)
# print(volume)
# print(day_low_price)
# print(day_high_price)
plt.rc("axes", grid=True)
plt.rc("grid", color="0.75", linestyle="-", linewidth=0.5)
textsize = 8
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor="white", figsize=(12, 6))
axescolor = "#f6f6f6" # the axes background color
ax1 = fig.add_axes(rect1, facecolor=axescolor) # left, bottom, width, height
ax2 = fig.add_axes(rect2, facecolor=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, facecolor=axescolor, sharex=ax1)
fig.autofmt_xdate()
# plot the relative strength indicator
rsi = relative_strength(last_price)
# print(len(rsi))
fillcolor = "darkgoldenrod"
timeline = pd.to_datetime(last_price.index, format="%Y-%m-%d")
ax1.plot(timeline, rsi, color=fillcolor)
ax1.axhline(70, color="darkgreen")
ax1.axhline(30, color="darkgreen")
ax1.fill_between(
timeline, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.fill_between(
timeline, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.text(
0.6,
0.9,
">70 = overbought",
va="top",
transform=ax1.transAxes,
fontsize=textsize,
)
ax1.text(0.6, 0.1, "<30 = oversold", transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30, 70])
ax1.text(
0.025, 0.95, "RSI (14)", va="top", transform=ax1.transAxes, fontsize=textsize
)
# ax1.set_title('{} daily'.format(stock))
# plot the price and volume data
dx = 0.0
low = day_low_price + dx
high = day_high_price + dx
deltas = np.zeros_like(last_price)
deltas[1:] = np.diff(last_price)
up = deltas > 0
ax2.vlines(timeline[up], low[up], high[up], color="black", label="_nolegend_")
ax2.vlines(timeline[~up], low[~up], high[~up], color="black", label="_nolegend_")
ma20 = last_price.rolling(window=20).mean()
ma200 = last_price.rolling(window=200, min_periods=50).mean()
# timeline = timeline.to_list()
(linema20,) = ax2.plot(timeline, ma20, color="blue", lw=2, label="MA (20)")
(linema200,) = ax2.plot(timeline, ma200, color="red", lw=2, label="MA (200)")
assert linema20 is not None
assert linema200 is not None
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc="lower left", shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (last_price * volume) / 1e6 # dollar volume in millions
# print(volume)
vmax = np.nanmax(volume)
# print(vmax)
poly = ax2t.fill_between(
timeline,
volume.to_list(),
0,
alpha=0.5,
label="Volume",
facecolor=fillcolor,
edgecolor=fillcolor,
)
assert poly is not None # avoid unused variable from pylint
ax2t.set_ylim(0, 5 * vmax)
ax2t.set_yticks([])
# compute the MACD indicator
fillcolor = "darkslategrey"
n_fast = 12
n_slow = 26
n_ema = 9
emafast = last_price.ewm(span=n_fast, adjust=False).mean()
emaslow = last_price.ewm(span=n_slow, adjust=False).mean()
macd = emafast - emaslow
nema = macd.ewm(span=n_ema, adjust=False).mean()
ax3.plot(timeline, macd, color="black", lw=2)
ax3.plot(timeline, nema, color="blue", lw=1)
ax3.fill_between(
timeline, macd - nema, 0, alpha=0.3, facecolor=fillcolor, edgecolor=fillcolor
)
ax3.text(
0.025,
0.95,
"MACD ({}, {}, {})".format(n_fast, n_slow, n_ema),
va="top",
transform=ax3.transAxes,
fontsize=textsize,
)
ax3.set_yticks([])
locator, formatter = auto_dates()
for ax in ax1, ax2, ax2t, ax3:
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
plt.xticks(fontsize=8)
try:
plt.xlim(left=datetime.strptime(start_date, "%Y-%m-%d"))
except IndexError:
print("WARNING: unable to set plot start_date - things may look weird")
plt.plot()
fig = plt.gcf()
plt.close(fig)
return fig
@timing
def plot_trend(sample_period="M", ld: LazyDictionary = None) -> str:
"""
Given a dataframe of a single stock from company_prices() this plots the highest price
in each month over the time period of the dataframe.
"""
assert "stock_df" in ld
def inner_date_fmt(dates_to_format):
results = []
for d in dates_to_format:
d -= timedelta(
weeks=4
) # breaks are set to the end of the month rather than the start... so
results.append(d.strftime("%Y-%m"))
return results
stock_df = ld["stock_df"]
# print(stock_df)
dataframe = stock_df.filter(items=["last_price"])
dataframe.index = pd.to_datetime(dataframe.index, format="%Y-%m-%d")
dataframe = dataframe.resample(sample_period).max()
# print(dataframe)
plot = (
p9.ggplot(
dataframe,
p9.aes(
x="dataframe.index", y=dataframe.columns[0], fill=dataframe.columns[0]
),
)
+ p9.geom_bar(stat="identity", alpha=0.7)
+ p9.scale_x_datetime(
labels=inner_date_fmt
) # dont print day (always 1st day of month due to resampling)
)
return user_theme(plot, y_axis_label="$ AUD", asxtrade_want_fill_continuous=True)
def plot_points_by_rule(net_points_by_rule: defaultdict(int)) -> p9.ggplot:
if net_points_by_rule is None or len(net_points_by_rule) < 1:
return None
rows = []
for k, v in net_points_by_rule.items():
rows.append({"rule": str(k), "net_points": v})
df = pd.DataFrame.from_records(rows)
plot = (
p9.ggplot(df, p9.aes(x="rule", y="net_points", fill="net_points"))
+ p9.geom_bar(stat="identity", alpha=0.7)
+ p9.coord_flip()
)
return user_theme(
plot,
x_axis_label="Rule",
y_axis_label="Contributions to points by rule",
subplots_adjust={"left": 0.2},
asxtrade_want_fill_continuous=True,
)
def plot_boxplot_series(df, normalisation_method=None):
"""
Treating each column as a separate boxplot and each row as an independent observation
(ie. different company)
render a series of box plots to identify a shift in performance from the observations.
normalisation_method should be one of the values present in
SectorSentimentSearchForm.normalisation_choices
"""
# and plot the normalised data
if normalisation_method is None or normalisation_method == "1":
normalized_df = df
y_label = "Percentage change"
elif normalisation_method == "2":
normalized_df = (df - df.min()) / (df.max() - df.min())
y_label = "Percentage change (min/max. scaled)"
else:
normalized_df = df / df.max(axis=0) # div by max if all else fails...
y_label = "Percentage change (normalised by dividing by max)"
n_inches = len(df.columns) / 5
melted = normalized_df.melt(ignore_index=False).dropna()
plot = (
p9.ggplot(melted, p9.aes(x="fetch_date", y="value"))
+ p9.geom_boxplot(outlier_colour="blue")
+ p9.coord_flip()
)
return user_theme(plot, y_axis_label=y_label, figure_size=(12, n_inches))
def plot_sector_field(df: pd.DataFrame, field, n_col=3):
# print(df.columns)
# assert set(df.columns) == set(['sector', 'date', 'mean_pe', 'sum_pe', 'sum_eps', 'mean_eps', 'n_stocks'])
n_unique_sectors = df["sector"].nunique()
df["date"] = pd.to_datetime(df["date"])
plot = (
p9.ggplot(df, p9.aes("date", field, group="sector", color="sector"))
+ p9.geom_line(size=1.0)
+ p9.facet_wrap(
"~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y"
)
)
return user_theme(
plot,
y_axis_label=f"Sector-wide {field}",
panel_spacing=0.3,
axis_text_x=p9.element_text(angle=30, size=7),
)
def plot_sector_top_eps_contributors(
df: pd.DataFrame, stocks_by_sector_df: pd.DataFrame
) -> p9.ggplot:
"""
Returns a plot of the top 20 contributors per sector, based on the most recent EPS value per stock in the dataframe. If no
stocks in a given sector have positive EPS, the sector will not be plotted.
"""
most_recent_date = df.columns[-1]
last_known_eps = df[most_recent_date]
last_known_eps = last_known_eps[last_known_eps >= 0.0].to_frame()
# print(stocks_by_sector_df)
last_known_eps = last_known_eps.merge(
stocks_by_sector_df, left_index=True, right_on="asx_code"
)
last_known_eps["rank"] = last_known_eps.groupby("sector_name")[
most_recent_date
].rank("dense", ascending=False)
last_known_eps = last_known_eps[last_known_eps["rank"] <= 10.0]
n_sectors = last_known_eps["sector_name"].nunique()
last_known_eps["eps"] = last_known_eps[most_recent_date]
plot = (
p9.ggplot(
last_known_eps,
p9.aes(
y="eps",
x="reorder(asx_code,eps)", # sort bars by eps within each sub-plot
group="sector_name",
fill="sector_name",
),
)
+ p9.geom_bar(stat="identity")
+ p9.facet_wrap("~sector_name", ncol=1, nrow=n_sectors, scales="free")
+ p9.coord_flip()
)
return user_theme(
plot,
y_axis_label="EPS ($AUD)",
x_axis_label="Top 10 ASX stocks per sector as at {}".format(most_recent_date),
subplots_adjust={"hspace": 0.4},
figure_size=(12, int(n_sectors * 1.5)),
asxtrade_want_cmap_d=False,
asxtrade_want_fill_d=True,
)
def plot_monthly_returns(
timeframe: Timeframe, stock: str, ld: LazyDictionary
) -> p9.ggplot:
start = timeframe.earliest_date
end = timeframe.most_recent_date
dt = | pd.date_range(start, end, freq="BMS") | pandas.date_range |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.TimeSeriesYSpecDataset.py
#
# Copyright (C) 2015-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes and represents data that is a function of time
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import numpy as np
import pandas as pd
from ..myplotspec.YSpecDataset import YSpecDataset
################################### CLASSES ###################################
class TimeSeriesYSpecDataset(YSpecDataset):
"""
Processes and represents data that is a function of time
Attributes:
timeseries_df (DataFrame): DataFrame whose index corresponds to
time as represented by frame number or chemical time and whose
columns are a series of quantities as a function of time.
"""
help_groups = ["spec"]
@classmethod
def construct_argparser(cls, **kwargs):
"""
Adds arguments to a nascent argument parser
Arguments:
kwargs (dict): Additional keyword arguments
Returns:
ArgumentParser: Argument parser or subparser
"""
# Process arguments
parser = cls.get_argparser(grouped_help=True, **kwargs)
if parser.get_default("cls") is None:
parser.set_defaults(cls=cls)
help_groups = kwargs.get("help_groups")
arg_groups = {ag.title: ag for ag in parser._action_groups}
input_group = arg_groups.get("input",
parser.add_argument_group("input"))
action_group = arg_groups.get("action",
parser.add_argument_group("action"))
output_group = arg_groups.get("output",
parser.add_argument_group("output"))
# Input arguments
cls.add_argument(input_group, "-spec", dest="source_spec",
metavar="SPEC", type=str, help="""file from which to load
specification; see '--help
spec' for more information""")
# Action arguments
cls.add_argument(action_group, "-dt", type=float,
help="time between frames")
cls.add_argument(action_group, "-toffset", type=float,
help="offset to add to index (time or frame number)")
cls.add_argument(action_group, "-downsample", type=int,
help="factor by which to downsample data")
cls.add_argument(action_group, "--pdist", action="store_true",
dest="calc_pdist",
help="calculate probability distribution over timeseries")
cls.add_argument(action_group, "--mean", action="store_true",
dest="calc_mean",
help="calculate mean and standard error over timeseries")
# Arguments inherited from superclass
super(TimeSeriesYSpecDataset, cls).construct_argparser(
parser=parser, **kwargs)
return parser
def __init__(self, dt=None, toffset=None, downsample=None,
calc_pdist=False, calc_mean=False, outfile=None, interactive=False,
**kwargs):
"""
Arguments:
infile{s} (list): Path(s) to input file(s); may contain
environment variables and wildcards
dt (float): Time interval between points; units unspecified
toffset (float): Time offset to be added to all points (i.e.
time of first point)
downsample (int): Interval by which to downsample points
downsample_mode (str): Method of downsampling; may be 'mean'
or 'mode'
calc_pdist (bool): Calculate probability distribution using
:method:`calc_pdist`; store in instance variable `pdist_df`
pdist_kw (dict): Keyword arguments used to configure
probability distribution calculation
calc_mean (bool): Calculate mean and standard error using
:method:`calc_mean`; store in instance variable `sequence_df`
interactive (bool): Provide iPython prompt and reading and
processing data
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
.. todo:
- Calculate pdist using histogram
- Verbose pdist
"""
# Process arguments
verbose = kwargs.get("verbose", 1)
self.dataset_cache = kwargs.get("dataset_cache", None)
print(dt)
# Read data
self.timeseries_df = self.read(**kwargs)
# Process data
if dt:
self.timeseries_df.set_index(
self.timeseries_df.index.values * float(dt), inplace=True)
self.timeseries_df.index.name = "time"
if toffset:
index_name = self.timeseries_df.index.name
self.timeseries_df.set_index(
self.timeseries_df.index.values + float(toffset), inplace=True)
self.timeseries_df.index.name = index_name
if downsample:
self.timeseries_df = self.downsample(df=self.timeseries_df,
downsample=downsample, **kwargs)
# Output data
if verbose >= 2:
print("Processed timeseries DataFrame:")
print(self.timeseries_df)
if outfile is not None:
self.write(df=self.timeseries_df, outfile=outfile, **kwargs)
# Calculate probability distibution
if calc_pdist:
pdist_kw = kwargs.get("pdist_kw", {})
self.pdist_df = self.calc_pdist(df=self.timeseries_df,
verbose=verbose, **pdist_kw)
if verbose >= 2:
print("Processed pdist DataFrame:")
print(self.pdist_df)
# WRITE IF STRING
# Calculate mean and standard error
if calc_mean:
block_kw = dict(min_n_blocks=2, max_cut=0.1, all_factors=False,
fit_exp=True, fit_sig=False)
block_kw.update(kwargs.get("block_kw", {}))
self.mean_df, self.block_averager = self.calc_mean(
df=self.timeseries_df, verbose=verbose, **block_kw)
if verbose >= 2:
print("Processed mean DataFrame:")
print(self.mean_df)
# WRITE IF STRING
# Interactive prompt
if interactive:
embed()
@staticmethod
def downsample(df, downsample, downsample_mode="mean", **kwargs):
"""
Downsamples time series.
Arguments:
df (DataFrame): Timeseries DataFrame to downsample
downsample (int): Interval by which to downsample points
downsample_mode (str): Method of downsampling; may be 'mean'
or 'mode'
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from scipy.stats.mstats import mode
# Process arguments
verbose = kwargs.get("verbose", 1)
# Truncate dataset
reduced = df.values[:df.shape[0] - (df.shape[0] % downsample), :]
new_shape = (
int(reduced.shape[0] / downsample), downsample, reduced.shape[1])
index = np.reshape(
df.index.values[:df.shape[0] - (df.shape[0] % downsample)],
new_shape[:-1]).mean(axis=1)
reduced = np.reshape(reduced, new_shape)
# Downsample
if downsample_mode == "mean":
if verbose >= 1:
wrapprint("downsampling by factor of {0} using mean".format(
downsample))
reduced = np.squeeze(reduced.mean(axis=1))
elif downsample_mode == "mode":
if verbose >= 1:
wrapprint("downsampling by factor of {0} using mode".format(
downsample))
reduced = np.squeeze(mode(reduced, axis=1)[0])
# Store downsampled time series
reduced = pd.DataFrame(data=reduced, index=index,
columns=df.columns.values)
reduced.index.name = "time"
df = reduced
return df
@staticmethod
def calc_mean(df, **kwargs):
"""
Calculates the mean and standard error over a timeseries.
Arguments:
df (DataFrame): Timeseries DataFrame over which to calculate mean and
standard error of each column over rows
all_factors (bool): Use all factors by which the
dataset is divisible rather than only factors of two
min_n_blocks (int): Minimum number of blocks after
transformation
max_cut (float): Maximum proportion of dataset of
omit in transformation
fit_exp (bool): Fit exponential curve
fit_sig (bool): Fit sigmoid curve
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: DataFrame including mean and standard error for each
column in *timeseries_df*
"""
from ..fpblockaverager.FPBlockAverager import FPBlockAverager
# Process arguments
verbose = kwargs.get("verbose", 1)
fit_exp = kwargs.get("fit_exp", True)
fit_sig = kwargs.get("fit_sig", True)
if verbose >= 1:
wrapprint(
"""Calculating mean and standard error over timeseries""")
# Single-level columns
if df.columns.nlevels == 1:
mean_df = pd.DataFrame(data=df.mean(axis=0))[0]
block_averager = FPBlockAverager(df, **kwargs)
if fit_exp and not fit_sig:
errors = block_averager.parameters.loc[("exp", "a (se)")]
elif fit_sig and not fit_exp:
errors = block_averager.parameters.loc[("sig", "b (se)")]
elif fit_exp and fit_sig:
errors = block_averager.parameters.loc[("exp", "a (se)")]
else:
raise Exception()
errors.index = errors.index.values + " se"
mean_df = | pd.concat([mean_df, errors]) | pandas.concat |
import json
import pandas as pd
import requests
import logging
log = logging.getLogger(__name__)
def get_titled_players(chess_title: str) -> list:
"""
Returns a list of player names
:param chess_title:
:return: None
"""
url = f'https://api.chess.com/pub/titled/{chess_title}'
log.info(f"Getting names of {chess_title} players using {url}")
response = requests.get(url)
log.info(f"Response from server: {response}")
response = response.json()
player_list = []
for player in response['players']:
player_list.append(player)
return player_list
def get_player_stats(player_list: list) -> None:
"""
Iterates through a list of player names and returns game statistics
:param player_list:
:return:
"""
with open('/data/player_stats.txt', 'w') as file:
for username in player_list:
url = f'https://api.chess.com/pub/player/{username}/stats'
log.info(f"Getting stats for player {username} players using {url}")
response = requests.get(url)
log.info(f"Response from server: {response}")
response = response.json()
response['username'] = username
json.dump(response, file)
file.write('\n')
def load_player_stats():
player_stats = []
with open('/data/player_stats.txt') as f:
for line in f:
player_stats.append(json.loads(line))
# create dataframe from json data
player_stats = pd.json_normalize(player_stats)
pd.set_option('display.max_columns', None)
df = | pd.DataFrame(player_stats) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import operator
import string
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_110
from cudf.testing._utils import (
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
)
@pytest.fixture
def pd_str_cat():
categories = list("abc")
codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2]
return pd.Categorical.from_codes(codes, categories=categories)
def test_categorical_basic():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
cudf_cat = cudf.Index(cat)
pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"])
sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"])
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
string = str(sr)
expect_str = """
p a
q a
r b
s c
t a
"""
assert all(x == y for x, y in zip(string.split(), expect_str.split()))
assert_eq(cat.codes, cudf_cat.codes.to_array())
def test_categorical_integer():
if not PANDAS_GE_110:
pytest.xfail(reason="pandas >=1.1 required")
cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(
cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_array()
)
assert sr.null_count == 2
np.testing.assert_array_equal(
pdsr.cat.codes.values,
sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_array(),
)
string = str(sr)
expect_str = """
0 a
1 <NA>
2 <NA>
3 c
4 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
assert string.split() == expect_str.split()
def test_categorical_compare_unordered():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# test equal
out = sr == sr
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr == pdsr)
# test inequality
out = sr != sr
assert not np.any(out.to_array())
assert not np.any(pdsr != pdsr)
assert not pdsr.cat.ordered
assert not sr.cat.ordered
# test using ordered operators
assert_exceptions_equal(
lfunc=operator.lt,
rfunc=operator.lt,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
)
def test_categorical_compare_ordered():
cat1 = pd.Categorical(
["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True
)
pdsr1 = pd.Series(cat1)
sr1 = cudf.Series(cat1)
cat2 = pd.Categorical(
["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True
)
pdsr2 = pd.Series(cat2)
sr2 = cudf.Series(cat2)
# test equal
out = sr1 == sr1
assert out.dtype == np.bool_
assert type(out[0]) == np.bool_
assert np.all(out.to_array())
assert np.all(pdsr1 == pdsr1)
# test inequality
out = sr1 != sr1
assert not np.any(out.to_array())
assert not np.any(pdsr1 != pdsr1)
assert pdsr1.cat.ordered
assert sr1.cat.ordered
# test using ordered operators
np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_array())
np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_array())
def test_categorical_binary_add():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([pdsr, pdsr],),
rfunc_args_and_kwargs=([sr, sr],),
expected_error_message="Series of dtype `category` cannot perform "
"the operation: add",
)
def test_categorical_unary_ceil():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_exceptions_equal(
lfunc=getattr,
rfunc=sr.ceil,
lfunc_args_and_kwargs=([pdsr, "ceil"],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: ceil",
)
def test_categorical_element_indexing():
"""
Element indexing to a cat column must give the underlying object
not the numerical index.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
assert_eq(pdsr, sr)
assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)
def test_categorical_masking():
"""
Test common operation for getting a all rows that matches a certain
category.
"""
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
# check scalar comparison
expect_matches = pdsr == "a"
got_matches = sr == "a"
np.testing.assert_array_equal(
expect_matches.values, got_matches.to_array()
)
# mask series
expect_masked = pdsr[expect_matches]
got_masked = sr[got_matches]
assert len(expect_masked) == len(got_masked)
assert len(expect_masked) == got_masked.valid_count
assert_eq(got_masked, expect_masked)
def test_df_cat_set_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a")
pddf = df.to_pandas(nullable_pd_dtype=False)
expect = pddf.set_index("a")
assert_eq(got, expect)
def test_df_cat_sort_index():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
got = df.set_index("a").sort_index()
expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index()
assert_eq(got, expect)
def test_cat_series_binop_error():
df = cudf.DataFrame()
df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc"))
df["b"] = np.arange(len(df))
dfa = df["a"]
dfb = df["b"]
# lhs is a categorical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfa, dfb],),
rfunc_args_and_kwargs=([dfa, dfb],),
check_exception_type=False,
expected_error_message="Series of dtype `category` cannot "
"perform the operation: add",
)
# if lhs is a numerical
assert_exceptions_equal(
lfunc=operator.add,
rfunc=operator.add,
lfunc_args_and_kwargs=([dfb, dfa],),
rfunc_args_and_kwargs=([dfb, dfa],),
check_exception_type=False,
expected_error_message="'add' operator not supported",
)
@pytest.mark.parametrize("num_elements", [10, 100, 1000])
def test_categorical_unique(num_elements):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), num_elements
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas())
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique_sorted = np.sort(pdf["a"].unique())
# verify
np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted)
@pytest.mark.parametrize("nelem", [20, 50, 100])
def test_categorical_unique_count(nelem):
# create categorical series
np.random.seed(12)
pd_cat = pd.Categorical(
pd.Series(
np.random.choice(
list(string.ascii_letters + string.digits), nelem
),
dtype="category",
)
)
# gdf
gdf = cudf.DataFrame()
gdf["a"] = cudf.Series.from_categorical(pd_cat)
gdf_unique_count = gdf["a"].nunique()
# pandas
pdf = pd.DataFrame()
pdf["a"] = pd_cat
pdf_unique = pdf["a"].unique()
# verify
assert gdf_unique_count == len(pdf_unique)
def test_categorical_empty():
cat = pd.Categorical([])
pdsr = pd.Series(cat)
sr = cudf.Series(cat)
np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_array())
# Test attributes
assert_eq(pdsr.cat.categories, sr.cat.categories)
assert pdsr.cat.ordered == sr.cat.ordered
np.testing.assert_array_equal(
pdsr.cat.codes.values, sr.cat.codes.to_array()
)
def test_categorical_set_categories():
cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"])
psr = pd.Series(cat)
sr = cudf.Series.from_categorical(cat)
# adding category
expect = psr.cat.set_categories(["a", "b", "c", "d"])
got = sr.cat.set_categories(["a", "b", "c", "d"])
assert_eq(expect, got)
# removing category
expect = psr.cat.set_categories(["a", "b"])
got = sr.cat.set_categories(["a", "b"])
assert_eq(expect, got)
def test_categorical_set_categories_preserves_order():
series = pd.Series([1, 0, 0, 0, 2]).astype("category")
# reassigning categories should preserve element ordering
assert_eq(
series.cat.set_categories([1, 2]),
cudf.Series(series).cat.set_categories([1, 2]),
)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_ordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(False))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(False))
assert cd_sr.cat.ordered is False
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_ordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_ordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is True
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_as_unordered(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(True))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(True))
assert cd_sr.cat.ordered is True
assert cd_sr.cat.ordered == pd_sr.cat.ordered
pd_sr_1 = pd_sr.cat.as_unordered(inplace=inplace)
cd_sr_1 = cd_sr.cat.as_unordered(inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert cd_sr_1.cat.ordered is False
assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("from_ordered", [True, False])
@pytest.mark.parametrize("to_ordered", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_reorder_categories(
pd_str_cat, from_ordered, to_ordered, inplace
):
pd_sr = pd.Series(pd_str_cat.copy().set_ordered(from_ordered))
cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(from_ordered))
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
kwargs = dict(ordered=to_ordered, inplace=inplace)
pd_sr_1 = pd_sr.cat.reorder_categories(list("cba"), **kwargs)
cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert_eq(pd_sr_1, cd_sr_1)
assert str(cd_sr_1) == str(pd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_add_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.add_categories(["d"], inplace=inplace)
cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "d" in pd_sr_1.cat.categories.to_list()
assert "d" in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
@pytest.mark.parametrize("inplace", [True, False])
def test_categorical_remove_categories(pd_str_cat, inplace):
pd_sr = pd.Series(pd_str_cat.copy())
cd_sr = cudf.Series(pd_str_cat.copy())
assert_eq(pd_sr, cd_sr)
assert str(pd_sr) == str(cd_sr)
pd_sr_1 = pd_sr.cat.remove_categories(["a"], inplace=inplace)
cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace)
pd_sr_1 = pd_sr if pd_sr_1 is None else pd_sr_1
cd_sr_1 = cd_sr if cd_sr_1 is None else cd_sr_1
assert "a" not in pd_sr_1.cat.categories.to_list()
assert "a" not in cd_sr_1.cat.categories.to_pandas().to_list()
assert_eq(pd_sr_1, cd_sr_1)
# test using ordered operators
assert_exceptions_equal(
lfunc=cd_sr.to_pandas().cat.remove_categories,
rfunc=cd_sr.cat.remove_categories,
lfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
rfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}),
expected_error_message="removals must all be in old categories",
)
def test_categorical_dataframe_slice_copy():
pdf = pd.DataFrame({"g": pd.Series(["a", "b", "z"], dtype="category")})
gdf = cudf.from_pandas(pdf)
exp = pdf[1:].copy()
gdf = gdf[1:].copy()
assert_eq(exp, gdf)
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
pd.Series([1, 2, 3, 89, 3, 1, 89], dtype="category"),
pd.Series(["1", "2", "3", "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", "9"], dtype="category"),
pd.Series(["1", "2", "3", None, "4", "5"], dtype="category"),
pd.Series(["1.0", "2.5", "3.001", None, "9"], dtype="category"),
pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]),
pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]),
pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"),
pd.Series([1, 2, 3, 89], dtype="float64"),
pd.Series([1, 2.5, 3.001, 89], dtype="float64"),
pd.Series([None, None, None]),
pd.Series([], dtype="float64"),
],
)
@pytest.mark.parametrize(
"cat_type",
[
pd.CategoricalDtype(categories=["aa", "bb", "cc"]),
pd.CategoricalDtype(categories=[2, 4, 10, 100]),
pd.CategoricalDtype(categories=["aa", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "bb", "c"]),
pd.CategoricalDtype(categories=["a", "b", "c"]),
pd.CategoricalDtype(categories=["1", "2", "3", "4"]),
pd.CategoricalDtype(categories=["1.0", "2.5", "3.001", "9"]),
pd.CategoricalDtype(categories=[]),
],
)
def test_categorical_typecast(data, cat_type):
pd_data = data.copy()
gd_data = cudf.from_pandas(data)
assert_eq(pd_data.astype(cat_type), gd_data.astype(cat_type))
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3, 89]),
pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]),
pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]),
pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"),
pd.Series([1, 2, 3, 89], dtype="float64"),
pd.Series([1, 2.5, 3.001, 89], dtype="float64"),
pd.Series([None, None, None]),
pd.Series([], dtype="float64"),
],
)
@pytest.mark.parametrize(
"new_categories",
[
["aa", "bb", "cc"],
[2, 4, 10, 100],
["aa", "bb", "c"],
["a", "bb", "c"],
["a", "b", "c"],
[],
pd.Series(["a", "b", "c"]),
pd.Series(["a", "b", "c"], dtype="category"),
pd.Series([-100, 10, 11, 0, 1, 2], dtype="category"),
],
)
def test_categorical_set_categories_categoricals(data, new_categories):
pd_data = data.copy().astype("category")
gd_data = cudf.from_pandas(pd_data)
assert_eq(
pd_data.cat.set_categories(new_categories=new_categories),
gd_data.cat.set_categories(new_categories=new_categories),
)
assert_eq(
pd_data.cat.set_categories(
new_categories=pd.Series(new_categories, dtype="category")
),
gd_data.cat.set_categories(
new_categories=cudf.Series(new_categories, dtype="category")
),
)
@pytest.mark.parametrize(
"data",
[
[1, 2, 3, 4],
["a", "1", "2", "1", "a"],
pd.Series(["a", "1", "22", "1", "aa"]),
pd.Series(["a", "1", "22", "1", "aa"], dtype="category"),
pd.Series([1, 2, 3, -4], dtype="int64"),
pd.Series([1, 2, 3, 4], dtype="uint64"),
pd.Series([1, 2.3, 3, 4], dtype="float"),
[None, 1, None, 2, None],
[],
],
)
@pytest.mark.parametrize(
"dtype",
[
pd.CategoricalDtype(categories=["aa", "bb", "cc"]),
pd.CategoricalDtype(categories=[2, 4, 10, 100]),
| pd.CategoricalDtype(categories=["aa", "bb", "c"]) | pandas.CategoricalDtype |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 00:49:21 2018
@author: teo
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 10:32:18 2018
@author: teo
"""
import pandas as pd
from plotly import tools
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.graph_objs as go
import cufflinks as cf
cf.go_offline()
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
#init_notebook_mode(connected=True)
data=pd.read_csv("C:\\Users\\teo\Downloads\\HR_comma_sep.csv",header=0)
# ------------------------------ Pre-Processing---------------------------------------------
data_viz=pd.DataFrame(data)
# Check for null columns
data_list=[data]
for dataset in data_list:
print("+++++++++++++++++++++++++++")
print(pd.isnull(dataset).sum() >0)
print("+++++++++++++++++++++++++++")
#We dont have missing values
xx=["other","sales","accounting","hr","technical","support","product_mng","marketing"]
for dataset in data_list[:]:
#Mapping departments
dep_mapping={"sales":1,"accounting":2,"hr":3,"technical":4,"support":5,"product_mng":6,"marketing":7}
dataset['sales']=dataset['sales'].map(dep_mapping)
dataset['sales']=dataset['sales'].fillna(0)# for other deparments RandD,IT
#Mapping salary
salary_mapping={'low':1,'medium':2,'high':3}
dataset['salary']=dataset['salary'].map(salary_mapping)
#Mapping monthly average hours
dataset.loc[dataset['average_montly_hours']<=100,'average_montly_hours'] =0
dataset.loc[(dataset['average_montly_hours']>100) & (dataset['average_montly_hours'] <=150),'average_montly_hours']=1
dataset.loc[(dataset['average_montly_hours']>150) & (dataset['average_montly_hours'] <=250),'average_montly_hours']=2
dataset.loc[(dataset['average_montly_hours']>250) & (dataset['average_montly_hours']<=300),'average_montly_hours'] =3
dataset.loc[dataset['average_montly_hours']>300,'average_montly_hours'] =4
#Mapping time spend company
dataset.loc[dataset['time_spend_company']<=3,'time_spend_company'] =3
dataset.loc[(dataset['time_spend_company']>3) & (dataset['time_spend_company'] <=6),'time_spend_company']=6
dataset.loc[dataset['time_spend_company']>6 ,'time_spend_company'] =9
#Mapping last evaluation
dataset.loc[dataset['last_evaluation']<=0.25,'last_evaluation'] =0
dataset.loc[(dataset['last_evaluation']>0.25) & (dataset['last_evaluation']<=0.5),'last_evaluation'] =1
dataset.loc[(dataset['last_evaluation']>0.5) & (dataset['last_evaluation'] <=0.75),'last_evaluation']=2
dataset.loc[dataset['last_evaluation']>0.75 ,'last_evaluation'] =3
#Mapping satisfaction level
dataset.loc[dataset['satisfaction_level']<=0.25,'satisfaction_level'] =0
dataset.loc[(dataset['satisfaction_level']>0.25) & (dataset['satisfaction_level'] <=0.5),'satisfaction_level']=1
dataset.loc[(dataset['satisfaction_level']>0.5) & (dataset['satisfaction_level']<=0.75),'satisfaction_level'] =2
dataset.loc[dataset['satisfaction_level']>0.75 ,'satisfaction_level'] =3
#convert list to dataframe
features = dataset.dtypes.index
dataDF = | pd.DataFrame() | pandas.DataFrame |
import os
import glob
import pandas as pd
import numpy as np
from datetime import datetime
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from preprocess import (sleep_preprocess, heart_preprocess, exercise_preprocess, stepcount_preprocess)
file = glob.glob(r'datasets\*')
file_list = []
for f in file:
if f.endswith('.csv'):
globals()['%s_df' % f.split('.')[0].split('\\')[1]] = pd.read_csv(f, index_col = 0)
file_list.append('%s_df' % f.split('.')[0].split('\\')[1])
#drop null columns
for df in file_list:
print(df)
l = len(globals()[df])
globals()[df].dropna(axis = 1, how = 'all', inplace = True)
# preprocess the datasets
sleep_df = sleep_preprocess(sleep_df)
heart_rate_df = heart_preprocess(heart_rate_df)
Exercise_df = exercise_preprocess(Exercise_df)
step_count_df = stepcount_preprocess(step_count_df)
def dailyroutine(dt):
# creat index for time to join heart rate and step counts
df = pd.DataFrame()
df['time'] = | pd.date_range("00:00", "23:59", freq="min") | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 22:43:29 2020
@author: Lyy
"""
import pandas as pd
import numpy as np
import re
import random
import matplotlib.patches as patches
import matplotlib.pyplot as plt
class Node(object):
idcase = {}
def __init__(self, nid, ntype, x, y):
self.id = nid
self.type = ntype
self.x = x
self.y = y
self.link_approach = []
self.link_exit = []
Node.idcase[self.id] = self
class Link(object):
idcase = {}
def __init__(self, lid, from_nd, to_nd, vf, num_of_lanes, w=12, kj=120, qmax=1800, flow_input=200):
self.id = lid
self.from_node = from_nd
self.to_node = to_nd
self.num_of_lanes = num_of_lanes
self.vf = vf
self.w = w
self.kj = kj
self.qmax = qmax
self.flowinput = flow_input
Link.idcase[self.id] = self
class Movement(object):
idcase = {}
def __init__(self, gdir, from_link, to_link):
self.gdirect = gdir # 1 = Left, 2 = Stright, 3 = Right
self.from_link = from_link
self.to_link = to_link
self.move = str(from_link) + ',' + str(to_link)
self.ratio = None
Movement.idcase[str(from_link) + ',' + str(to_link)] = self
def getAllMovement():
df = pd.DataFrame(columns=["Movement", "gdirect", "Corr_Node", "Ratio"])
index = 0
temp_df = pd.DataFrame(columns=["Movement", "gdirect", "Corr_Node", "Ratio"])
from_link = 0
for key in Movement.idcase:
if from_link == 0 or from_link == Movement.idcase[key].from_link:
temp_df.loc[index] = [key,
Movement.idcase[key].gdirect,
Node.idcase[Link.idcase[Movement.idcase[key].from_link].to_node].id,
Movement.idcase[key].ratio]
from_link = Movement.idcase[key].from_link
else:
temp_df = temp_df.sort_values(by="gdirect")
df = df.append(temp_df)
temp_df = pd.DataFrame(columns=["Movement", "gdirect", "Corr_Node", "Ratio"])
temp_df.loc[index] = [key,
Movement.idcase[key].gdirect,
Node.idcase[Link.idcase[Movement.idcase[key].from_link].to_node].id,
Movement.idcase[key].ratio]
from_link = 0
index += 1
temp_df = temp_df.sort_values(by="gdirect")
df = df.append(temp_df)
df.to_csv("movement.csv")
def getMovementByFromLink(self):
linkid = self.from_link
tempList = []
for key in Movement.idcase:
if linkid == Movement.idcase[key].from_link:
tempList.append(Movement.idcase[key])
else:
continue
return tempList
def readNetwork():
link_file = pd.read_csv("link.csv")
node_file = pd.read_csv("node.csv")
for i in range(len(link_file)):
Link(*list(link_file.iloc[i]))
for j in range(len(node_file)):
Node(*list(node_file.iloc[j]))
for key in Node.idcase:
if Node.idcase[key].type == 0:
for subkey in Link.idcase:
if Link.idcase[subkey].from_node == Node.idcase[key].id:
Node.idcase[key].link_exit.append(Link.idcase[subkey])
elif Link.idcase[subkey].to_node == Node.idcase[key].id:
Node.idcase[key].link_approach.append(Link.idcase[subkey])
def getLength(x,y):
return np.sqrt(x**2 + y**2)
def getCosine(x1, x2, y1, y2):
return (x2*x1 + y2*y1) / getLength(x1,y1) / getLength(x2,y2)
def getCrossProduct(va, vb):
return va[0]*vb[1] - va[1]*vb[0]
def initMovement():
for key in Node.idcase:
for app_link in Node.idcase[key].link_approach:
app_vector = [Node.idcase[app_link.to_node].x - Node.idcase[app_link.from_node].x,
Node.idcase[app_link.to_node].y - Node.idcase[app_link.from_node].y]
for exit_link in Node.idcase[key].link_exit:
exit_vector = [Node.idcase[exit_link.to_node].x - Node.idcase[exit_link.from_node].x,
Node.idcase[exit_link.to_node].y - Node.idcase[exit_link.from_node].y]
cosine = getCosine(app_vector[0], exit_vector[0], app_vector[1], exit_vector[1])
if getCrossProduct(app_vector, exit_vector) > 0:
right_flag = 0
else:
right_flag = 1
if -0.707 < cosine <= 0.707 and right_flag:
Movement(3, app_link.id, exit_link.id)
elif 0.707 < cosine <= 1:
Movement(2, app_link.id, exit_link.id)
elif -0.707 < cosine <= 0.707 and not right_flag:
Movement(1, app_link.id, exit_link.id)
def getExitLinkByApproachLink(n):
tempList = []
for key in Movement.idcase:
if Movement.idcase[key].from_link == n:
tempList.append(Movement.idcase[key].to_link)
return tempList
def convertOutput():
df = pd.read_csv("output_valve.csv", index_col=0)
for key in Node.idcase:
nd = Node.idcase[key]
if nd.type == 0:
temp_df = pd.DataFrame(column=df.columns)
output_phase = pd.DataFrame()
for applink in nd.link_approach:
exitlink_list = getExitLinkByApproachLink(applink)
for elem in exitlink_list:
move = str(applink)+','+str(elem)
temp_df.loc[move] = df.loc[move]
count = 0
for i in range(len(temp_df.columns)):
if i < len(temp_df.columns) - 1:
b = list(temp_df[temp_df.columns[i]] == temp_df[temp_df.columns[i+1]])
if False in b:
output_phase
count = 0
else:
count += 1
def getRatio():
df = pd.read_csv('Zhu_model_flow.csv', index_col=0)
tot_vol = 0
for row in df.index:
m = re.split(r'\D', row)
move = m[1] + ',' + m[-4]
if m[1] == m[-4]:
tot_vol = np.sum(df.loc[row])
if move in Movement.idcase and tot_vol:
Movement.idcase[move].ratio = np.sum(df.loc[row]) / tot_vol
ratio = 0
for key in Movement.idcase:
if Movement.idcase[key].ratio == None:
movements = Movement.idcase[key].getMovementByFromLink()
for i in range(len(movements)):
if not i:
movements[i].ratio = random.uniform(0, 1)
ratio += movements[i].ratio
elif i > 0 and i < len(movements) - 1:
movements[i].ratio = random.uniform(0, 1 - ratio)
ratio += movements[i].ratio
else:
movements[i].ratio = 1 - ratio
ratio = 0
def plotSignal():
movedf = pd.read_csv('movement.csv', index_col=1)
signaldf = pd.read_csv('output_signal.csv')
signaldf = signaldf.where(signaldf['gdirect'] != 3).dropna()
movement_list = []
for i in range(len(signaldf)):
movement_list.append(str(int(signaldf.iloc[i]['from'])) + ',' + str(int(signaldf.iloc[i]['to'])))
signaldf['movement'] = movement_list
signaldf = signaldf.set_index('movement')
style = "Simple,tail_width=0.5,head_width=4,head_length=8"
kw = dict(arrowstyle=style, color="k")
for nd in Node.idcase:
if Node.idcase[nd].type != 0:
continue
tempdf = movedf.where(movedf['Corr_Node'] == nd).dropna(subset=['Corr_Node']).where(movedf['gdirect'] != 3).dropna(subset=['Corr_Node'])
green_phase = {}
theta = signaldf.loc[tempdf.index]['theta'].drop_duplicates().sort_values()
count = 0
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
duration = 0
last_phase = 0
last_signal_df = | pd.DataFrame() | pandas.DataFrame |
# <NAME> (<EMAIL>)
from __future__ import absolute_import, division, print_function
from builtins import range
import numpy as np
import pandas as pd
RANDOM = "random"
ORDRED = "ordered"
LINEAR = "linear"
ORDERED = ORDRED # Alias with extra char but correct spelling
SFT_FMT = "L%d"
INDEX = None # Dummy variable to represent index of dataframe
DEFAULT_SPLIT = {INDEX: (RANDOM, 0.8)} # The ML standard for some reason
def build_lag_df(df, n_lags, stride=1, features=None):
"""Build a lad dataframe from dataframe where the rows are ordered time
indices for a time series data set. This is useful for autoregressive
models.
Parameters
----------
df : DataFrame, shape (n_samples, n_cols)
Orginal dataset we want to build lag data set from.
n_lags : int
Number of lags. ``n_lags=1`` means only the original data set. Must be
>= 1.
stride : int
Stride of the lags. For instance, ``stride=2`` means only even lags.
features : array-like, shape (n_features,)
Subset of columns in `df` to include in the lags data. All columns are
retained for lag 0. For data frames containing features and targets,
the features (inputs) can be placed in `features` so the targets
(outputs) are only present for lag 0. If None, use all columns.
Returns
-------
df : DataFrame, shape (n_samples, n_cols + (n_lags - 1) * n_features)
New data frame where lags data frames have been concat'ed tegether.
The columns are a new hierarchical index with the lag at the lowest
level.
Examples
--------
>>> data=np.random.choice(10,size=(4,3))
>>> df=pd.DataFrame(data=data,columns=['a','b','c'])
>>> ds.build_lag_df(df,3,features=['a','b'])
a b c a b a b
lag L0 L0 L0 L1 L1 L2 L2
0 2 2 2 NaN NaN NaN NaN
1 2 9 4 2 2 NaN NaN
2 8 4 0 2 9 2 2
3 3 5 6 8 4 2 9
"""
df_sub = df if features is None else df[list(features)] # Take all if None
D = {(SFT_FMT % nn): df_sub.shift(stride * nn) for nn in range(1, n_lags)}
D[SFT_FMT % 0] = df
df = | pd.concat(D, axis=1, names=["lag"]) | pandas.concat |
import logging
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
logger = logging.getLogger("ACE")
class TestAskJunoACE:
def __init__(self):
self.k_fold_count = 4
self.num_epochs = 500
self.all_mae_histories = []
def fit_1(self, file_name):
names = ["reach", "impressions", "results", "amount", "frequency", "clicks", "cpc", "ctr", "cpreach", "cpm",
"engagement", "cpr"]
data = pd.read_csv(file_name, engine='c', dtype='float64', names=names, header=0, skiprows=0)
mean = data.mean(axis=0)
data -= mean
std = data.std(axis=0)
data /= std
x = data.iloc[:, 0:10]
y = data.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(x_train.shape[1],)),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
model.fit(x_train, y_train, epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(x_test, y_test)
logger.info(f'mse score #{test_mse_score}, mae score #{test_mae_score}')
#https://stackoverflow.com/questions/40729162/merging-results-from-model-predict-with-original-pandas-dataframe
y_hats = model.predict(x_test)
y_test['preds'] = y_hats
df_out = | pd.merge(data, y_test[['preds']], how='left', left_index=True, right_index=True) | pandas.merge |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
# 提交处理
# rule_data1.fft+ever_1
# rule_data2.规则2+fft+ever_1
# rule_data3.规则4+规则2+fft+ever_1
# rule_data4.规则4
# rule_data5.规则5
dire = '../../data/'
train = pd.read_csv(dire + 'train5.csv', encoding='utf-8')
test = pd.read_csv(dire + 'test5.csv', encoding='utf-8')
rule_data1 = pd.read_csv(dire + 'backup/LOVECT/rule_data1.csv', encoding='utf-8')
rule_data2 = pd.read_csv(dire + 'backup/LOVECT/rule_data2.csv', encoding='utf-8')
rule_data3 = pd.read_csv(dire + 'backup/LOVECT/rule_data3.csv', encoding='utf-8')
rule_data4 = pd.read_csv(dire + 'backup/LOVECT/rule_data4.csv', encoding='utf-8')
rule_data5 = pd.read_csv(dire + 'backup/LOVECT/rule_data5.csv', encoding='utf-8')
rule_data6 = pd.read_csv(dire + 'backup/LOVECT/rule_data6.csv', encoding='utf-8')
rule_data7 = pd.read_csv(dire + 'backup/LOVECT/rule_data7.csv', encoding='utf-8')
rule_data8 = | pd.read_csv(dire + 'backup/LOVECT/rule_data8.csv', encoding='utf-8') | pandas.read_csv |
import os
import pandas as pd
from random import randint
# Class responsible for: calculating where to paste detection images on
# background, pasting images on backgrounds, formatting and outputting CSV
# files
class DataGenerator:
def __init__(self, image_set, background_set, num_samples, cutoff):
self.image_set = image_set
self.background_set = background_set
self.num_samples = num_samples
self.train_csv_lines = []
self.test_csv_lines = []
self.cutoff = cutoff * background_set.number_of_backgrounds *\
num_samples
def process_images(self):
print("----- PROCESSING IMAGES -----")
sample_no = 1
train_dir = "output/train/"
test_dir = "output/test/"
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(test_dir):
os.makedirs(test_dir)
for background in self.background_set.background_images:
for i in range(0, self.num_samples):
tmp_back = background.background_image.copy()
new_name = str(sample_no) + ".png"
for image in self.image_set.detection_images:
r_width = tmp_back.width - image.width
r_height = tmp_back.height - image.height
width_offset = randint(0, r_width)
height_offset = randint(0, r_height)
offset = (width_offset, height_offset)
# Place card in offset position
tmp_back.paste(image.detection_image, offset)
# x_min, y_min, x_max, y_max
x_min = offset[0]
y_min = offset[1]
x_max = x_min + image.width
y_max = y_min + image.height
class_name = image.name[0:-4]
if sample_no <= self.cutoff:
self.train_csv_lines.append(
(new_name, tmp_back.width, tmp_back.height,
class_name, x_min, y_min, x_max, y_max))
else:
self.test_csv_lines.append(
(new_name, tmp_back.width, tmp_back.height,
class_name, x_min, y_min, x_max, y_max))
# Name and save the new image (background image remains
# unchanged)
print("Processing Sample ", sample_no)
# if sample_no <= self.cutoff:
if sample_no <= self.cutoff:
tmp_back.save("output/train/" + new_name)
else:
tmp_back.save("output/test/" + new_name)
sample_no += 1
def generate_csv(self):
train_list = self.train_csv_lines
test_list = self.test_csv_lines
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin',
'xmax', 'ymax']
train_df = | pd.DataFrame(train_list, columns=column_name) | pandas.DataFrame |
import pandas as pd
typeDict = {'Types': ['yearly', 'monthly', 'daily', 'hourly']}
aDict = {'AUS_Dep_Results A': [3.1, 4.6, 7.9, 8.4]}
a1Dict = {'AUS_With_Results A': [3.1, 4.6, 7.9, 8.4]}
bDict = {'AUS_Dep_Results B': [5.4, 9.3, 1.2, 6.6]}
b1Dict = {'AUS_With_Results B': [5.4, 9.3, 1.2, 6.6]}
cDict = {'HUN_Dep_Results A': [2.1, 3.6, 4.9, 5.4]}
c1Dict = {'HUN_With_Results A': [2.1, 3.6, 4.9, 5.4]}
dDict = {'HUN_Dep_Results B': [9.4, 8.3, 7.2, 1.6]}
d1Dict = {'HUN_With_Results B': [9.4, 8.3, 7.2, 1.6]}
# python 3.5 unpack
table = {**typeDict, **aDict, **a1Dict, **bDict, **b1Dict, **cDict, **c1Dict, **dDict, **d1Dict}
indx = list(typeDict.keys())[0]
df = | pd.DataFrame(table) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
| tm.assert_frame_equal(df[cols], rs_c, check_names=False) | pandas._testing.assert_frame_equal |
# create dataframes based on the available data
import json
import os
import geopandas as gpd
import pandas as pd
import rasterio
import rasterstats
import time
import random
def generate_dataframe(shapefile, raster):
# Read the shapefile and convert its crs
districts = gpd.read_file(shapefile)
s = shapefile.split("/")
e = s[6].split("_")
nuts = e[1].split(".")[0]
reg = e[0]
#exclusive for europe shapefile
try:
districts["GEN"]=districts["NAME"]
districts["SHN"]=districts["NUTS"]
except:
pass
# districts.drop(["GEN"], axis=1)
# districts["GEN"]= districts["POPULATION"]
districts = districts.to_crs('epsg:4326')
# convert it into a geoJson file
districts.to_file("./data/geojsonFiles/geojson_{}_{}".format(reg, nuts), driver="GeoJSON", encoding="utf-8")
with open("./data/geojsonFiles/geojson_{}_{}".format(reg, nuts), encoding="utf-8") as geofile:
geojson_layer = json.load(geofile)
# concordance between the df and the geojson file based on an 'id' key
state_id_map = {}
for feature in geojson_layer['features']:
feature['id'] = feature['properties']['GEN']
state_id_map[feature['properties']['SHN']] = feature['id']
districts['id'] = districts['SHN'].apply(lambda x: state_id_map[x])
# import the raster file
rf = rasterio.open(raster, mode='r')
# get the stats from the raster and the shapefile
# Assign raster values to a numpy nd array
polluant = rf.read(1)
affine = rf.transform
# Calculating the zonal statistics
avg_pl = rasterstats.zonal_stats(districts,
polluant,
affine=affine,
stats=['mean', 'min', 'max', 'std'],
geojson_out=True)
# Extracting the average data from the list
avg_poll = []
i = 0
while i < len(avg_pl):
avg_poll.append(avg_pl[i]['properties'])
i = i + 1
# Transfering the infromation from the list to a pandas DataFrame
avg_pl_gr = pd.DataFrame(avg_poll)
districts["mean"] = avg_pl_gr["mean"]
districts["min"] = avg_pl_gr["min"]
districts["max"] = avg_pl_gr["max"]
districts["std"] = avg_pl_gr["std"]
districts ["nuts"] = nuts
districts ["zone"] = reg
districts ["COVID Cases"] = random.randint(10000,30000)
keep = ["SHN", "GEN", "mean", "min", "max", "std", "nuts", "zone", "id", "COVID Cases"]
districts = districts[[c for c in districts.columns if c in keep]]
return districts, geojson_layer
def rem_dir(saving_path, p, n):
names = os.listdir(p)
names = pd.DataFrame(names)
names.to_excel("{}/{}.xlsx".format(saving_path, n), index=False)
return
def create_csvs(path):
zones = os.listdir(path)
rem_dir(path, path, "ancienCountries")
for zone in zones:
if ".xlsx" not in zone:
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
poll = os.listdir(('{}/{}/polluant').format(path, zone))
#
rem_dir(('{}/{}/shapefiles/nuts1').format(path, zone),
('{}/{}/shapefiles/nuts1').format(path, zone),
"{}_nuts1".format(zone))
rem_dir(('{}/{}/shapefiles/nuts2').format(path, zone),
('{}/{}/shapefiles/nuts2').format(path, zone),
"{}_nuts2".format(zone))
rem_dir(('{}/{}/shapefiles/nuts3').format(path, zone),
('{}/{}/shapefiles/nuts3').format(path, zone),
"{}_nuts3".format(zone))
rem_dir(('{}/{}/polluant').format(path, zone),
('{}/{}/polluant').format(path, zone),
"{}_polluants".format(zone))
#
for p in poll:
if ".xlsx" not in p:
ppath = os.path.join(
('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
rem_dir(('{}/{}/polluant/{}').format(path, zone, p),
('{}/{}/polluant/{}').format(path, zone, p),
"{}_polluants_{}".format(zone, p))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}'
).format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel((
'./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
return
def generate_from_countries(path, zone):
'''
This function allows the generation of csvs based on a given path and specific region
'''
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
poll = os.listdir(('{}/{}/polluant').format(path, zone))
#
rem_dir(('{}/{}/shapefiles/nuts1').format(path, zone),
('{}/{}/shapefiles/nuts1').format(path,
zone), "{}_nuts1".format(zone))
rem_dir(('{}/{}/shapefiles/nuts2').format(path, zone),
('{}/{}/shapefiles/nuts2').format(path,
zone), "{}_nuts2".format(zone))
rem_dir(('{}/{}/shapefiles/nuts3').format(path, zone),
('{}/{}/shapefiles/nuts3').format(path,
zone), "{}_nuts3".format(zone))
rem_dir(('{}/{}/polluant').format(path, zone),
('{}/{}/polluant').format(path, zone), "{}_polluants".format(zone))
#
for p in poll:
ppath = os.path.join(('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
rem_dir(('{}/{}/polluant/{}').format(path, zone, p),
('{}/{}/polluant/{}').format(path, zone, p),
"{}_polluants_{}".format(zone, p))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, p, year, month))
except:
pass
return
def generate_from_polluant(path, zone, polluant):
pelement = os.listdir(('{}/{}/polluant/{}').format(path, zone, polluant))
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(
path, zone,
res[0]), ('{}/{}/polluant/{}/{}').format(
path, zone, polluant, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = polluant
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx'
).format(zone, polluant, year, month))
except:
pass
return
def generate_from_nuts(n, path, zone, nutfile):
poll = os.listdir(('{}/{}/polluant').format(path, zone))
for p in poll:
if ".xlsx" not in p:
ppath = os.path.join(('{}/{}/polluant/').format(path, zone), p)
pelement = os.listdir(ppath)
for tif in pelement:
if ".xlsx" not in tif:
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
#try:
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/{}/{}').format(
path, zone, n, nutfile),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_{}_{}_{}.xlsx').format(
zone, p, n, year, month))
#except:
# pass
return
def generate_from_tif(path, zone, p, tif):
nuts1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
nuts2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
nuts3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
part = tif.split("_")
year = part[1]
month = part[2]
month = month.split(".")
month = month[0]
month = str(int(month))
for n in [nuts1, nuts2, nuts3]:
if n == nuts1:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts1/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts1_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
elif n == nuts2:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts2/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts2_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
else:
try:
res = [i for i in n if '.shp' in i]
dataframe, jsonfile = generate_dataframe(
('{}/{}/shapefiles/nuts3/{}').format(path, zone, res[0]),
('{}/{}/polluant/{}/{}').format(path, zone, p, tif))
dataframe['year'] = int(year)
dataframe['month'] = int(month)
dataframe['date'] = '{}/{}'.format(month, year)
dataframe['polluant'] = p
dataframe.to_excel(
('./data/processedData/{}_{}_nuts3_{}_{}.xlsx').format(
zone, p, year, month))
except:
pass
return
def check_new_countries(path):
'''
This function allow us to check the countries directory and generate csvs from newly added regions
'''
zones = os.listdir(path)
ancien = pd.read_excel("./data/rawData/ancienCountries.xlsx")
lis = ancien[0].tolist()
new = []
for zone in zones:
if ".xlsx" not in zone:
if zone not in lis:
new.append(zone)
number_new_docs = len(new)
print("there is {} new regions added".format(number_new_docs))
if len(new) != 0:
for i in new:
if ".xlsx" not in i:
try:
generate_from_countries(path, i)
except:
pass
lis = lis + new
liste = pd.DataFrame(lis)
liste.to_excel("{}/ancienCountries.xlsx".format(path),
index=False)
return
def check_new_polluants(path):
'''
This fuction allows us to check polluants directory of each area
and generates csvs from a newly added ones
'''
zones = os.listdir(path)
for zone in zones:
if ".xlsx" not in zone:
poll = os.listdir(('{}/{}/polluant').format(path, zone))
ancien = pd.read_excel(
"./data/rawData/{}/polluant/{}_polluants.xlsx".format(
zone, zone))
try:
lis = ancien[0].tolist()
except:
lis = []
pass
new = []
for p in poll:
if p not in lis:
if ".xlsx" not in p:
new.append(p)
number_new_poll = len(new)
print("there is {} new polluant added for the zone {}".format(
number_new_poll, zone))
if len(new) != 0:
for i in new:
if ".xlsx" not in i:
try:
generate_from_polluant(path, zone, i)
except:
pass
lis = lis + new
liste = pd.DataFrame(lis)
liste.to_excel(
"{}/{}/polluant/{}_polluants.xlsx".format(
path, zone, zone),
index=False)
return
def check_new_nuts(path):
zones = os.listdir(path)
for zone in zones:
if ".xlsx" not in zone:
n1 = os.listdir(('{}/{}/shapefiles/nuts1').format(path, zone))
an1 = pd.read_excel("{}/{}/shapefiles/nuts1/{}_nuts1.xlsx".format(
path, zone, zone))
try:
lis1 = an1[0].tolist()
except:
lis1 = []
pass
new1 = []
n2 = os.listdir(('{}/{}/shapefiles/nuts2').format(path, zone))
an2 = pd.read_excel("{}/{}/shapefiles/nuts2/{}_nuts2.xlsx".format(
path, zone, zone))
try:
lis2 = an2[0].tolist()
except:
lis2 = []
pass
new2 = []
n3 = os.listdir(('{}/{}/shapefiles/nuts3').format(path, zone))
an3 = pd.read_excel("{}/{}/shapefiles/nuts3/{}_nuts3.xlsx".format(
path, zone, zone))
try:
lis3 = an3[0].tolist()
except:
lis3 = []
pass
new3 = []
#try:
for n in n1:
if (n not in lis1) and (".xlsx" not in n):
new1.append(n)
for n in n2:
if (n not in lis2) and (".xlsx" not in n):
new2.append(n)
for n in n3:
if (n not in lis3) and (".xlsx" not in n):
new3.append(n)
number_new_nuts1 = len(new1)
number_new_nuts2 = len(new2)
number_new_nuts3 = len(new3)
print(
"there is {} new nuts1 file added for the zone {}".format(
number_new_nuts1, zone))
print(
"there is {} new nuts2 file added for the zone {}".format(
number_new_nuts2, zone))
print(
"there is {} new nuts3 file added for the zone {}".format(
number_new_nuts3, zone))
if len(new1) != 0:
for i in new1:
if ".shp" in i:
try:
generate_from_nuts("nuts1", path, zone, i)
except:
pass
lis1 = lis1 + new1
liste1 = | pd.DataFrame(lis1) | pandas.DataFrame |
from bs4 import BeautifulSoup as BS
import requests
import pandas as pd
import os
round = 1
data = []
while True:
a = requests.get(f"https://www.sololearn.com/codes?page={round}").content
html = BS(a, "html.parser")
code_boxes = html.find_all("div", class_="code")
print(round)
if len(code_boxes) < 1:
break
for i in code_boxes:
temp = dict()
dt = i.find("p", class_="codeDate").attrs["data-date"].split(" ")
date = dt[0].split("/")
temp["title"] = i.find("a", class_="nameLink").get_text()
temp["codeURL"] = i.find("a", class_="nameLink")["href"]
temp["userName"] = i.find("a", class_="userName").get_text()
temp["profileURL"] = "https://www.sololearn.com" + \
i.find("a", class_="userName")["href"]
temp["profilePicture"] = "https://www.sololearn.com" + \
i.find("a", class_="userName")["href"]
temp["date"] = date[1]+"-"+date[0]+"-"+date[2]
temp["time"] = dt[1]+" "+dt[2]
temp["codeLang"] = i.find("a", class_="icon").get_text()
vote = i.find("p", class_="positive")
url = i.find("a", class_="nameLink")["href"]
id = url.replace("https://code.sololearn.com/", "")[0:12]
temp["id"] = id
if vote:
temp["upvotes"] = int(
vote.get_text().replace("+", "")
)
else:
temp["upvotes"] = 0
data.append(temp)
round += 1
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 10:04:20 2022
@author: wyattpetryshen
"""
#Data Source: https://climate.weather.gc.ca/climate_data/hourly_data_e.html?hlyRange=2014-10-23%7C2022-01-20&dlyRange=2018-10-29%7C2022-01-20&mlyRange=%7C&StationID=52959&Prov=BC&urlExtension=_e.html&searchType=stnProx&optLimit=specDate&Month=6&Day=1&StartYear=1840&EndYear=2019&Year=2021&selRowPerPage=25&Line=2&txtRadius=25&optProxType=navLink&txtLatDecDeg=49.745&txtLongDecDeg=-114.8839&timeframe=1
#Sort and Plot Wind Direction and Speed
import pandas as pd
import numpy as np
import os
from windrose import WindroseAxes
import matplotlib.pyplot as plt
from datetime import datetime
import matplotlib
os.chdir('PATH') #Input Data Path (Folder containing monthly data)
files = os.listdir() #Delete DS.store file if included in list of files
y2021 = [pd.read_csv(i, sep=",", header = [0])for i in files]
columns = ['Year','Month','Day','Date/Time (LST)','Temp (°C)','Rel Hum (%)',
'Wind Dir (10s deg)', 'Wind Spd (km/h)']
df2021 = | pd.concat(y2021,ignore_index=True) | pandas.concat |
import pandas as pd
import seaborn as sns
import numpy as np
def plot_mri_settings_scatter(df, path, subject):
"""
function to group data by mri_settings and plot data
returns data for each mri setting as dataframe and plot as linegraph
inclding scatterplot
"""
df_base = None
df_tr1 = None
df_tr2 = None
df_t1w = None
grouped = df.groupby('acquisition_scheme')
for name, group in grouped:
print(name)
# print(group)
if 'baseline'in name:
df_base = pd.DataFrame(group)
plt.figure()
print((0))
g = sns.FacetGrid(df_base,hue='order_volume', height=6, aspect=6,palette='BuGn')
g.map(sns.lineplot, "Frequency (Hz)", "Level (dBFS)", alpha=1, linewidth=1)
g.map(plt.scatter, "Frequency (Hz)", "Level (dBFS)", s=50, alpha=1, linewidth=1)
plt.title('Baseline')
g.add_legend()
sns.despine()
g.savefig(path+subject+'_baseline.png')
elif 'epi_fast' in name:
df_tr1 = | pd.DataFrame(group) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
def preprocess(df):
returns = df[['beat0', 'beat1', 'beat2', 'beat3']].copy()
tickers = df[['ticker']].copy()
df = df.drop(columns=['ticker', "longName", "logo_url", "close_price", 'beat0', 'beat1', 'beat2', 'beat3'])
# standardize to mean 0, sd 1
std = (df-df.mean()) / df.std()
df = | pd.concat([tickers, std, returns], axis=1) | pandas.concat |
import os
import urllib
import json
import time
import arrow
import numpy as np
import pandas as pd
from pymongo import MongoClient, UpdateOne
MONGO_URI = os.environ.get('MONGO_URI')
DARKSKY_KEY = os.environ.get('DARKSKY_KEY')
FARM_LIST = ['BLUFF1', 'CATHROCK', 'CLEMGPWF', 'HALLWF2', 'HDWF2',
'LKBONNY2', 'MTMILLAR', 'NBHWF1', 'SNOWNTH1', 'SNOWSTH1',
'STARHLWF', 'WATERLWF', 'WPWF']
FARM_NAME_LIST = ['Bluff Wind Farm', 'Cathedral Rocks Wind Farm',
'Clements Gap Wind Farm','Hallett 2 Wind Farm',
'Hornsdale Wind Farm 2', 'Lake Bonney Stage 2 Windfarm',
'Mt Millar Wind Farm', 'North Brown Hill Wind Farm',
'Snowtown Wind Farm Stage 2 North',
'Snowtown South Wind Farm', 'Starfish Hill Wind Farm',
'Waterloo Wind Farm', 'Wattle Point Wind Farm']
def connect_db(MONGO_URI):
"""Connect to MongoDB & return the client object."""
return MongoClient(MONGO_URI)
def fetch_data(client, farm, limit):
"""Get the last N row of data."""
time_start = time.time()
db = client['wpp']
print(f'Fetching data for {farm}...', end='', flush=True)
col = db[farm]
if limit == None:
df = pd.DataFrame(col.find({}, batch_size=10000).sort('_id', -1))
else:
df = pd.DataFrame(
col.find({}, batch_size=1000).sort('_id', -1).limit(limit))
if '_id' in df.columns:
df = df.rename(columns={'_id': 'time'})
runtime = round(time.time()-time_start, 2)
print(f' Done! Fetched {len(df)} documents in {runtime} s')
return df
def update_db(farm, update_df, upsert=True):
"""Update database via bulk write."""
if 'time' in update_df.columns:
update_df = update_df.rename(columns={'time': '_id'})
client = connect_db(MONGO_URI)
db = client['wpp']
ops = []
for i in range(len(update_df)):
_id = update_df.iloc[i]._id
data = update_df.iloc[i].to_dict()
ops.append(UpdateOne({'_id': _id}, {'$set': data}, upsert=upsert))
db[farm].bulk_write(ops)
def fill_val(raw, offset):
"""Fill missing value with the mean of the -24h and +24h data.
offset is the rows for the +24h/-24h, for 1h interval is 24,
for 5min interval is 288.
"""
df = raw.copy(deep=True)
for item in df.drop('time', axis=1).columns:
for i in df[df.isna().any(1)].index:
# Take into consideration if missing values don't have -24h and +24h data
try:
v_plus = df[item][i+offset]
except:
v_plus = np.nan
try:
v_minus = df[item][i-offset]
except:
v_minus = np.nan
# fill with the with the mean of the -24h and +24h data if they both exist
# otherwise, just fill with the one that exists
if not pd.isnull(v_plus) and not pd.isnull(v_minus):
v = 0.5 * (v_plus + v_minus)
elif pd.isnull(v_plus):
v = v_minus
elif pd.isnull(v_minus):
v = v_plus
else:
v = np.nan
df.loc[i, item] = v
return df
def get_power(farm, local_start_dt, local_end_dt):
"""Get power data & convert it to 1h format."""
tz = 'Australia/Adelaide'
utc_start_dt = pd.to_datetime(
local_start_dt).tz_localize(tz).tz_convert('UTC')
utc_end_dt = pd.to_datetime(local_end_dt).tz_localize(tz).tz_convert('UTC')
offset = (arrow.now() - arrow.get(local_start_dt)).days + 2
power_api = f'https://services.aremi.data61.io/aemo/v6/duidcsv/{farm}?offset={offset}D'
raw = pd.read_csv(power_api)
raw.columns = ['time', 'actual']
raw['time'] = pd.to_datetime(raw['time'])
# Ensure no vacant in the time series
reference_idx = pd.date_range(start=raw.iloc[0].time,
end=raw.iloc[-1].time,
freq='5min',
name='time')
raw = raw.set_index('time').reindex(reference_idx).reset_index()
raw = fill_val(raw, offset=288)
# Slice the raw df to a desired range
power_5min = raw[(raw['time'] >= utc_start_dt)
& (raw['time'] < utc_end_dt)]
# rectify negative value
neg_idx = power_5min[power_5min.actual < 0].index
power_5min.loc[neg_idx, 'actual'] = 0
# aggregate by the hour
power_1h = power_5min.set_index('time')['actual'].resample(
'60min', offset='30min', label='left').mean().reset_index()
power_1h.time = power_1h.time.dt.strftime('%Y-%m-%d %H:%M:%S')
return power_1h
def get_weather(farm, local_start_dt, local_end_dt):
"""Get weather data from Darksky.
local_start_dt and local_end_dt are strings in format of %Y-%m-%d %H:%M:%S.
Return a dataframe with hourly weather data.
"""
overview = pd.read_csv('https://services.aremi.data61.io/aemo/v6/csv/wind')
overview.set_index('DUID', inplace=True)
location = f"{overview.loc[farm,'Lat']},{overview.loc[farm,'Lon']}"
flags = '?exclude=currently,daily,flags&units=si'
# Set a datetime range, call the api for each day's data in the dt range
local_dt_range = pd.date_range(local_start_dt, local_end_dt, freq='1D')
weather = pd.DataFrame()
for dt in local_dt_range[:-1]:
# Construct the API url for each day
time = dt.strftime('%Y-%m-%d')+'T00:00:00'
dsapi = f'https://api.darksky.net/forecast/{DARKSKY_KEY}/{location},{time}{flags}'
with urllib.request.urlopen(dsapi) as url:
data = json.loads(url.read().decode())
try:
df = pd.DataFrame(data['hourly']['data'])
except:
df = pd.DataFrame()
weather = | pd.concat([weather, df], axis=0, sort=True) | pandas.concat |
"""
This file is part of the accompanying code to our paper
<NAME>., <NAME>., <NAME>., & <NAME>. (2021). Uncovering flooding mecha-
nisms across the contiguous United States through interpretive deep learning on
representative catchments. Water Resources Research, 57, e2021WR030185.
https://doi.org/10.1029/2021WR030185.
Copyright (c) 2021 <NAME>. All rights reserved.
You should have received a copy of the MIT license along with the code. If not,
see <https://opensource.org/licenses/MIT>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as mpl
import matplotlib.patches as mpatches
import matplotlib.dates as mdates
def plot_peaks(Q, peak_dates, plot_range=[None, None], linecolor="tab:brown", markercolor="tab:red", figsize=(7.5, 2.0)):
"""
Plot the identified flood peaks.
Parameters
----------
Q: pandas series of streamflow observations.
peak_dates: a sequence of flood peaks' occurrence dates.
plot_range: the date range of the plot, it can be a pair of date strings (default: [None, None]).
linecolor: the color of the line (default: 'tab:brown').
markercolor: the color of the marker (default: 'tab:red').
figsize: the width and height of the figure in inches (default: (7.5, 2.0)).
"""
fig, ax = plt.subplots(figsize=figsize)
fig.tight_layout()
plot_range[0] = Q.index[0] if plot_range[0] == None else plot_range[0]
plot_range[1] = Q.index[-1] if plot_range[1] == None else plot_range[1]
ax.plot(Q["flow"].loc[plot_range[0]:plot_range[1]], color=linecolor, lw=1.0)
ax.plot(
Q.loc[peak_dates, "flow"].loc[plot_range[0]:plot_range[1]],
"*",
c=markercolor,
markersize=8,
)
ax.set_title(f"Identified flood peaks from {plot_range[0]} to {plot_range[1]}")
ax.set_ylabel("flow(mm)")
plt.show()
def plot_eg_individual(dataset, peak_eg_dict, peak_eg_var_dict, peak_date, title_suffix=None, linewidth=1.5, figsize=(10, 3)):
eg_plot = dataset.loc[pd.date_range(end=peak_date, periods=list(peak_eg_dict.values())[0].shape[1]+1, freq='d')[:-1]]
eg_plot.loc[:, "prcp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 1])
eg_plot.loc[:, "prcp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 1])
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs1 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.00, right=0.45, height_ratios=[2.5, 1.5])
ax1 = fig.add_subplot(gs1[0, 0])
ax2 = fig.add_subplot(gs1[1, 0])
gs2 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.55, right=1.00, height_ratios=[2.5, 1.5])
ax3 = fig.add_subplot(gs2[0, 0])
ax4 = fig.add_subplot(gs2[1, 0])
for ax in [ax1, ax3]:
ax.spines["bottom"].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
for ax in [ax2, ax4]:
ax.set_ylabel(r'$\phi^{EG}_{i}$')
ax.spines["top"].set_visible(False)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_ylim(bottom=np.min(peak_eg_dict[pd.to_datetime(peak_date)]),
top=np.max(peak_eg_dict[pd.to_datetime(peak_date)]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax1.plot(eg_plot['prcp'], color='k', lw=linewidth)
ax1.set_ylabel('P [mm]', ha='center', y=0.5)
ax2.plot(eg_plot['prcp_eg'], color='blue', lw=linewidth)
ax2.fill_between(eg_plot['prcp_eg'].index,
eg_plot['prcp_eg']-eg_plot.loc[:, "prcp_eg_val"],
eg_plot['prcp_eg']+eg_plot.loc[:, "prcp_eg_val"], color='blue', alpha=0.3)
ax2.yaxis.label.set_color('blue')
ax2.tick_params(axis='y', colors='blue')
ax3.plot(eg_plot['tmean'], color='k', lw=linewidth)
ax3.set_ylabel('T [\u2103]', ha='center', y=0.5)
ax4.plot(eg_plot['temp_eg'], color='red', lw=linewidth)
ax4.fill_between(eg_plot['temp_eg'].index,
eg_plot['temp_eg']-eg_plot.loc[:, "temp_eg_val"],
eg_plot['temp_eg']+eg_plot.loc[:, "temp_eg_val"], color='red', alpha=0.3)
ax4.yaxis.label.set_color('red')
ax4.tick_params(axis='y', colors='red')
ax1.set_title(f"Flood on {pd.to_datetime(peak_date).strftime('%d %B %Y')} {str(title_suffix)}",
fontweight='bold', loc='left')
plt.show()
def plot_arrow(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2, arrowstyle="-|>", facecolor='black')
a1.add_artist(con)
def plot_simple_arrow(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2, arrowstyle="->", facecolor='black')
a1.add_artist(con)
def plot_line(a1, p1, a2, p2, coordsA='axes fraction', coordsB='axes fraction'):
con = mpatches.ConnectionPatch(xyA=p1, xyB=p2, coordsA=coordsA, coordsB=coordsB,
axesA=a1, axesB=a2)
a1.add_artist(con)
def plot_decomp(dataset, decomp_dict, peak_date, title_suffix=None, linewidth=1.0, figsize=(10, 5)):
blue_colors = mpl.cm.Blues(np.linspace(0,1,16))
green_colors = mpl.cm.Greens(np.linspace(0,1,16))
red_colors = mpl.cm.Reds(np.linspace(0,1,16))
purple_colors = mpl.cm.Purples(np.linspace(0,1,16))
winter_colors = mpl.cm.winter(np.linspace(0,1,16))
autumn_colors = mpl.cm.autumn(np.linspace(0,1,16))
decomp_plot = dataset.loc[pd.date_range(end=peak_date, periods=list(decomp_dict.values())[0]['x'].shape[0]+1, freq='d')]
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs1 = fig.add_gridspec(nrows=2, ncols=1, hspace=1.2, left=0.000, right=0.180, top=0.70, bottom=0.30)
gs2 = fig.add_gridspec(nrows=6, ncols=1, hspace=0.6, left=0.250, right=0.550)
gs3 = fig.add_gridspec(nrows=3, ncols=1, hspace=0.6, left=0.650, right=1.000, top=0.80, bottom=0.20)
ax1_1 = fig.add_subplot(gs1[0, 0])
ax1_2 = fig.add_subplot(gs1[1, 0])
ax2_1 = fig.add_subplot(gs2[0, 0])
ax2_2 = fig.add_subplot(gs2[1, 0])
ax2_3 = fig.add_subplot(gs2[2, 0])
ax2_4 = fig.add_subplot(gs2[3, 0])
ax2_5 = fig.add_subplot(gs2[4, 0])
ax2_6 = fig.add_subplot(gs2[5, 0])
ax3_1 = fig.add_subplot(gs3[0, 0])
ax3_2 = fig.add_subplot(gs3[1, 0])
ax3_3 = fig.add_subplot(gs3[2, 0])
ax1_1.plot(decomp_plot['prcp'].iloc[:-1], color='k', lw=linewidth)
ax1_2.plot(decomp_plot['tmean'].iloc[:-1], color='k', lw=linewidth)
for i in range(16):
ax2_1.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hi_arr'][:, i],
c=green_colors[i], alpha=0.60, lw=linewidth)
ax2_2.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hc_arr'][:, i],
c=blue_colors[i], alpha=0.60, lw=linewidth)
ax2_3.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['hf_arr'][:, i],
c=red_colors[i], alpha=0.60, lw=linewidth)
ax2_4.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['ho_arr'][:, i],
c=purple_colors[i], alpha=0.60, lw=linewidth)
ax2_5.plot(decomp_plot.index[:], decomp_dict[pd.to_datetime(peak_date)]['c_states'][:, i],
c=autumn_colors[i], alpha=0.60, lw=linewidth)
ax2_6.plot(decomp_plot.index[:], decomp_dict[pd.to_datetime(peak_date)]['h_states'][:, i],
c=winter_colors[i], alpha=0.60, lw=linewidth)
ax3_1.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['h_update'][:, i],
c='#000', alpha=0.60, lw=linewidth*0.6)
ax3_2.plot(decomp_plot.index[:-1], decomp_dict[pd.to_datetime(peak_date)]['h_forget'][:, i],
c='#000', alpha=0.60, lw=linewidth*0.6)
ax3_3.bar(decomp_plot.index[:-1],
np.matmul(decomp_dict[pd.to_datetime(peak_date)]['h_forget'][:] * decomp_dict[pd.to_datetime(peak_date)]['h_update'][:],
decomp_dict[pd.to_datetime(peak_date)]['dense_W'])[:, 0],
edgecolor='k',
width=np.timedelta64(1, 'D'),
color='red',
linewidth=0.6)
ax1_1.set_xticklabels([])
ax2_1.set_xticklabels([])
ax2_2.set_xticklabels([])
ax2_3.set_xticklabels([])
ax2_4.set_xticklabels([])
ax2_5.set_xticklabels([])
ax3_1.set_xticklabels([])
ax3_2.set_xticklabels([])
ax1_1.set_title('Precipitation [mm]', loc='left', pad=0)
ax1_2.set_title('Temperature [\u2103]',loc='left', pad=0)
ax2_1.set_title(r'Input gate $i_t$', loc='left', pad=0)
ax2_2.set_title(r'Candidate vector $\tilde{c}_t$', loc='left', pad=0)
ax2_3.set_title(r'Forget gate $f_t$', loc='left', pad=0)
ax2_4.set_title(r'Output gate $o_t$', loc='left', pad=0)
ax2_5.set_title(r'Cell state $c_t$', loc='left', pad=0)
ax2_6.set_title(r'Hidden state $h_t$', loc='left', pad=0)
ax3_1.set_title(r'Information initially gained', loc='left', pad=0)
ax3_2.set_title(r'Proportion to be retained', loc='left', pad=0)
ax3_3.set_title(r'Information actually contributed', loc='left', pad=0)
ax1_2.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax1_2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
for tick in ax1_2.xaxis.get_ticklabels()[1:3] + ax1_2.xaxis.get_ticklabels()[4:6]:
tick.set_visible(False)
ax2_6.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax2_6.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
for tick in ax2_6.xaxis.get_ticklabels()[1::2]:
tick.set_visible(False)
ax3_3.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax3_3.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
for tick in ax3_3.xaxis.get_ticklabels()[1::2]:
tick.set_visible(False)
ax3_2.set_ylim(bottom=-0.1*np.percentile(decomp_dict[ | pd.to_datetime(peak_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return | DataFrame(ad) | pandas.DataFrame |
#!/usr/bin/env python3
"""Make silly table showing distribution over block sizes for extended blocks
world run."""
import os
import re
import click
import pandas as pd
NAME_RE = re.compile(
# success-blocks-nblk35-seed2107726020-seq42
r'\b(?P<succ_fail>success|failure)-blocks-nblk(?P<nblk>\d+)(-ntow'
r'(?P<ntow>\d+))?-seed\d+-seq\d+\b')
@click.command()
@click.option(
'--directory',
default='./',
help='directory with all the blocks results in them (e.g folders of the '
'form `success-blcoks-nblk50-ntow4-seed…`)')
def main(directory):
file_names = os.listdir(directory)
result_dicts = []
for file_name in file_names:
match = NAME_RE.match(file_name)
if not match:
print("Skipping '%s'" % file_name)
continue
groups = match.groupdict()
success = groups['succ_fail'] == 'success'
if groups['ntow'] is None:
ntow = 'R'
else:
ntow = int(groups['ntow'])
data_dict = {
'successes': success,
'failures': not success,
'instances': 1,
'blocks': int(groups['nblk']),
'towers': ntow,
}
result_dicts.append(data_dict)
result_frame = | pd.DataFrame.from_records(result_dicts) | pandas.DataFrame.from_records |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Loads the episode lengths from the csv files into a dictionary and return the dictionary
def load_data(algpath, name='episodes'):
Data = []
dirFiles = os.listdir(algpath)
# Files = np.array([i for i in dirFiles if 'episodes' in i])
Files = np.array([i for i in dirFiles if name in i])
for fileIndex in range(len(Files)):
if name == "episodes":
List = pd.read_csv(algpath+'/'+Files[fileIndex])
Data.append(List['episode lengths'])
elif name == "rewards":
List = pd.read_csv(algpath+'/'+Files[fileIndex])
Data.append(List['rewards'])
# return np.array(Data) if len(Data) !=1 else np.array([Data])
return np.array(Data)
def convert_data_ep(Data):
convertedData = []
for run in range(len(Data)):
episodeLengthsData = Data[run].to_numpy()
failureTimesteps = np.cumsum(episodeLengthsData)
totalTimesteps = failureTimesteps[-1]
# Not a failure on the last episode on the last timestep
if episodeLengthsData[-1] != 0.0:
failureTimesteps = failureTimesteps[:-1]
failureTimesteps_DataFrame = | pd.DataFrame({'failures': failureTimesteps}) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = | pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import warnings
def _validate_axis(data, axis):
ndim = data.ndim
if not -ndim <= axis < ndim:
raise IndexError('axis %r out of bounds [-%r, %r)'
% (axis, ndim, ndim))
if axis < 0:
axis += ndim
return axis
def _select_along_axis(values, idx, axis):
other_ind = np.ix_(*[np.arange(s) for s in idx.shape])
sl = other_ind[:axis] + (idx,) + other_ind[axis:]
return values[sl]
def nanfirst(values, axis):
axis = _validate_axis(values, axis)
idx_first = np.argmax(~ | pd.isnull(values) | pandas.isnull |
def Cosiner(params : dict):
def Column_correction(table):
drop_col = [i for i in table.columns if "Unnamed" in i]
table.drop(drop_col, axis = 1, inplace = True)
return table
def Samplewise_export(neg_csv_file, pos_csv_file, out_path, merged_edge_table, merged_node_table) :
print("Exporting sample-wise tables...")
neg_csv = pd.read_csv(neg_csv_file, index_col ="row ID")
pos_csv = pd.read_csv(pos_csv_file, index_col ="row ID")
neg_csv = Column_correction(neg_csv)
pos_csv = Column_correction(pos_csv)
neg_csv.columns = neg_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('NEG_', '', regex = False)
pos_csv.columns = pos_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('POS_', '', regex = False)
neg_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
pos_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
samples = list(set(list(neg_csv.columns) + list(pos_csv.columns)))
samples.sort()
nodes_neg = node_table[node_table['ion_mode'] == "NEG"]
nodes_neg = nodes_neg['feature_id'][nodes_neg['status_universal'] != "neutral"].astype(int).tolist()
neg_csv = neg_csv.loc[nodes_neg]
nodes_pos = node_table[node_table['ion_mode'] == "POS"]
nodes_pos = nodes_pos['feature_id'][nodes_pos['status_universal'] != "neutral"].astype(int).tolist()
pos_csv = pos_csv.loc[nodes_pos]
for sample in tqdm(samples):
#sample = samples[0]
ion_ids_neg = neg_csv.index[neg_csv[sample] > 0.0]
ion_ids_pos = pos_csv.index[pos_csv[sample] > 0.0]
#convert feature_ids to the new indexes
tmp_table = merged_node_table[merged_node_table['status'] != "neg_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "pos_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "mix_neutral"]
tmp_table_pos = tmp_table[tmp_table['ion_mode'] == "POS"]
tmp_table_neg = tmp_table[tmp_table['ion_mode'] == "NEG"]
ion_idx_neg = pd.Series(tmp_table_neg.index, index = tmp_table_neg['feature_id'])
ion_idx_neg = list(ion_idx_neg[ion_ids_neg])
ion_idx_pos = pd.Series(tmp_table_pos.index, index = tmp_table_pos['feature_id'])
ion_idx_pos = list(ion_idx_pos[ion_ids_pos])
ion_idx_mix = ion_idx_neg + ion_idx_pos
# Get sample neutrals
neutral_edges = merged_edge_table.loc[merged_edge_table["Adnotation"].dropna().index]
kept_edges = [i for i in neutral_edges.index if neutral_edges.loc[i, "node_2"] in ion_idx_mix]
# Get ion edges
ion_edges = merged_edge_table[merged_edge_table['status'] != "neg_add_edge"]
ion_edges = ion_edges[ion_edges['status'] != "pos_add_edge"]
for i in ion_edges.index:
if ion_edges.loc[i, "node_1"] in ion_idx_mix:
if ion_edges.loc[i, "node_2"] in ion_idx_mix:
kept_edges.append(i)
kept_edges.sort()
sample_edges = merged_edge_table.loc[kept_edges]
sample_edges.sort_values('node_1', inplace = True)
sample_edges.reset_index(inplace = True, drop = True)
kept_nodes = list(set(list(sample_edges['node_1']) + list(sample_edges['node_2'])))
kept_nodes.sort()
sample_nodes = merged_node_table.loc[kept_nodes].copy()
sample_nodes.drop(pd.Series(samples) + ".mzXML Peak area", axis = 1, inplace = True)
sample_nodes[sample] = merged_node_table[sample + ".mzXML Peak area"]
sample_nodes.to_csv(out_path + "MIX_" + sample + "_nodes.csv", index_label = "Index")
sample_edges.to_csv(out_path + "MIX_" + sample + "_edges.csv", index_label = "Index")
return
def Samplewise_export_single(csv_file, out_path, merged_edge_table, merged_node_table) :
print("Exporting sample-wise tables...")
csv = pd.read_csv(csv_file, index_col ="row ID")
csv = Column_correction(csv)
csv.columns = csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('NEG_', '', regex = False).str.replace('POS_', '', regex = False)
csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
samples = list(csv.columns)
samples.sort()
nodes = merged_node_table['feature_id'][merged_node_table['status_universal'] != "neutral"].astype(int).tolist()
csv = csv.loc[nodes]
for sample in tqdm(samples):
#sample = samples[0]
ion_ids = csv.index[csv[sample] > 0.0]
#convert feature_ids to the new indexes
tmp_table = merged_node_table[merged_node_table['status_universal'] != "neutral"]
ion_idx= pd.Series(tmp_table.index, index = tmp_table['feature_id'])
ion_idx = list(ion_idx[ion_ids])
# Get sample neutrals
neutral_edges = merged_edge_table.loc[merged_edge_table["Adnotation"].dropna().index]
kept_edges = [i for i in neutral_edges.index if neutral_edges.loc[i, "node_2"] in ion_idx]
# Get ion edges
ion_edges = merged_edge_table[merged_edge_table['status'] != "add_edge"]
ion_edges = ion_edges[ion_edges['status'] != "add_edge"]
for i in ion_edges.index:
if ion_edges.loc[i, "node_1"] in ion_idx:
if ion_edges.loc[i, "node_2"] in ion_idx:
kept_edges.append(i)
kept_edges.sort()
sample_edges = merged_edge_table.loc[kept_edges]
sample_edges.sort_values('node_1', inplace = True)
sample_edges.reset_index(inplace = True, drop = True)
kept_nodes = list(set(list(sample_edges['node_1']) + list(sample_edges['node_2'])))
kept_nodes.sort()
sample_nodes = merged_node_table.loc[kept_nodes].copy()
sample_nodes.drop(pd.Series(samples) + ".mzXML Peak area", axis = 1, inplace = True)
sample_nodes[sample] = merged_node_table[sample + ".mzXML Peak area"]
sample_nodes.to_csv(out_path + "MIX_" + sample + "_nodes.csv", index_label = "Index")
sample_edges.to_csv(out_path + "MIX_" + sample + "_edges.csv", index_label = "Index")
return
def Spectrum_purge(ion_mode, mgf_file) :
singletons = node_table.index[node_table['status'] == ion_mode.lower() + "_singleton"].tolist()
empty_nodes = list()
print(f"Purging empty {ion_mode} singletons...")
for i in tqdm(singletons):
mgf_idx = int(node_table.loc[i, "mgf_index"])
if len(mgf_file[mgf_idx].peaks.mz) == 0 :
node_table.drop
empty_nodes.append(i)
del_edges = [edge_table.index[edge_table['node_1'] == n][0] for n in empty_nodes]
node_table.drop(empty_nodes, inplace = True)
edge_table.drop(del_edges, inplace = True)
return
import os
import pandas as pd
import sys
from matchms.importing import load_from_mgf
from matchms.filtering import default_filters
from matchms.similarity import ModifiedCosine
from tqdm import tqdm
def Spectrum_processing(s):
s = default_filters(s)
return s
# Load parameters
single_mode= params['single_mode']
mzmine_path_neg= params['neg_out_0']
mzmine_path_pos= params['pos_out_0']
neg_csv_file= params['neg_csv']
pos_csv_file= params['pos_csv']
neg_mgf_file= params['neg_mgf']
pos_mgf_file= params['pos_mgf']
in_path= params['mix_out_5_1']
out_path_full= params['mix_out_6_1']
out_path_samples= params['mix_out_6_2']
purge_empty_spectra = params['c_purge_empty_spectra']
mass_error= params['c_mass_error']
cosine_threshold= params['c_lowcos_threshold']
cosiner_threshold= params['c_hardcos_threshold']
matched_peaks= params['c_matched_peaks']
modified_cosine = ModifiedCosine(tolerance=mass_error)
# Load files
node_table = pd.read_csv(in_path + "MIX_nodes.csv", index_col = "Index")
edge_table = pd.read_csv(in_path + "MIX_edges.csv", index_col = "Index")
if single_mode == "BOTH":
neg_mgf = list(load_from_mgf(mzmine_path_neg + neg_mgf_file))
pos_mgf = list(load_from_mgf(mzmine_path_pos + pos_mgf_file))
neg_mgf = [Spectrum_processing(s) for s in neg_mgf]
pos_mgf = [Spectrum_processing(s) for s in pos_mgf]
elif single_mode == "POS":
pos_mgf = list(load_from_mgf(mzmine_path_pos + pos_mgf_file))
pos_mgf = [Spectrum_processing(s) for s in pos_mgf]
node_table['status'] = "pos_" + node_table['status']
edge_table['ion_mode'] = ["POS"]*len(edge_table)
elif single_mode == "NEG":
neg_mgf = list(load_from_mgf(mzmine_path_neg + neg_mgf_file))
neg_mgf = [Spectrum_processing(s) for s in neg_mgf]
node_table['status'] = "neg_" + node_table['status']
edge_table['ion_mode'] = ["NEG"]*len(edge_table)
else:
raise Exception('single_mode parameter in the params file badly set, please use either "POS", "NEG" or "BOTH".')
# Make a Series with MGF indexes as data and feature IDs as indexes
if single_mode == "BOTH":
neg_mgf_data = pd.Series(dtype = int)
for i in range(len(neg_mgf)):
neg_mgf_data.loc[int(neg_mgf[i].get("feature_id"))] = i
pos_mgf_data = pd.Series(dtype = int)
for i in range(len(pos_mgf)):
pos_mgf_data.loc[int(pos_mgf[i].get("feature_id"))] = i
elif single_mode == "POS":
pos_mgf_data = pd.Series(dtype = int)
for i in range(len(pos_mgf)):
pos_mgf_data.loc[int(pos_mgf[i].get("feature_id"))] = i
elif single_mode == "NEG":
neg_mgf_data = pd.Series(dtype = int)
for i in range(len(neg_mgf)):
neg_mgf_data.loc[int(neg_mgf[i].get("feature_id"))] = i
# If single mode, produce molecular clusters as these were not produced
# without the Mode Merger step
if single_mode != "BOTH":
node_pool = list(node_table.index)
singletons = list(edge_table["node_1"][edge_table['status'] == "self_edge"])
node_pool = list(set(node_pool) - set(singletons))
cluster_list = []
cluster_size_list = []
total_nodes = len(node_pool)
while len(node_pool) > 0:
new_cluster = [node_pool[0]]
cluster_size = 0
perc = round((1-(len(node_pool)/total_nodes))*100,1)
sys.stdout.write("\rDefining new clusters : {0}%".format(perc))
sys.stdout.flush()
while cluster_size != len(new_cluster):
cluster_size = len(new_cluster)
tmp_idx = []
for i in new_cluster:
tmp_idx += list(edge_table.index[edge_table['node_1'] == i])
tmp_idx += list(edge_table.index[edge_table['node_2'] == i])
new_cluster += list(edge_table.loc[tmp_idx, 'node_1'])
new_cluster += list(edge_table.loc[tmp_idx, 'node_2'])
new_cluster = list(set(new_cluster))
new_cluster.sort()
node_pool = list(set(node_pool) - set(new_cluster))
cluster_size_list.append(len(new_cluster))
cluster_list.append('|'.join(list(map(str, new_cluster))))
cluster_table= pd.DataFrame()
cluster_table['cluster'] = cluster_list
cluster_table['cluster_size'] = cluster_size_list
cluster_table.sort_values('cluster_size', ascending = False, inplace = True)
cluster_table.reset_index(drop = True, inplace = True)
# Identify molecular clusters
cluster_molecular = list()
for i in cluster_table.index:
node_list = cluster_table.loc[i, "cluster"].split('|')
node_list = list(map(int, node_list))
tmp_table_1 = node_table.loc[node_list,:]
if sum(tmp_table_1['status'] == "neutral") > 0 :
cluster_molecular.append(True)
else:
cluster_molecular.append(False)
cluster_table["molecular_cluster"] = cluster_molecular
node_table['cluster_id'] = [-1]*len(node_table)
print('Assigning new cluster indexes...')
for i in tqdm(cluster_table.index):
node_list = list(map(int, cluster_table.loc[i, 'cluster'].split('|')))
for j in node_list :
node_table.loc[j, 'cluster_id'] = i
# List the molecular clusters (clusters with at least one neutral node)
cluster_list = []
cluster_list_neg = []
cluster_list_pos = []
print('Finding molecular clusters...')
for i in tqdm(node_table['cluster_id'].unique()):
if sum(node_table['status_universal'][node_table['cluster_id'] == i] == "neutral") > 0:
cluster_list.append(i)
if "NEG" in node_table['ion_mode'][node_table['cluster_id'] == i].unique():
cluster_list_neg.append(i)
if "POS" in node_table['ion_mode'][node_table['cluster_id'] == i].unique():
cluster_list_pos.append(i)
cluster_list.sort()
cluster_list_neg.sort()
cluster_list_pos.sort()
# Cluster singletons and precursor ions in non-molecular clusters to ions in molecular clusters
unclustered_ions = [i for i in node_table.index if node_table.loc[i, "cluster_id"] not in cluster_list]
remains_ions_neg = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_singleton'])
remains_ions_neg += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_precursor'])
remains_ions_pos = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_singleton'])
remains_ions_pos += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_precursor'])
neg_node_table = node_table[node_table['ion_mode'] == "NEG"]
cluster_ion_list = pd.Series(index = cluster_list_neg, dtype = int)
for i in cluster_ion_list.index:
tmp_rows = neg_node_table.index[neg_node_table['cluster_id'] == i]
cluster_ion_list[i] = '|'.join(neg_node_table.loc[tmp_rows, 'mgf_index'].dropna().astype(int).astype(str))
cluster_ion_list.sort_index(inplace = True)
full_node_1 = []
full_node_2 = []
full_cluster_ids = []
full_cosine = []
full_matches = []
for i in tqdm(remains_ions_neg):
i_id = int(node_table.loc[i, "feature_id"])
i_spectrum = neg_mgf[neg_mgf_data[i_id]]
tmp_cluster_list = []
tmp_id_list = []
tmp_cosine_list = []
tmp_prod_list = []
tmp_match_list = []
for j in cluster_ion_list.index:
ion_list = list(map(int, cluster_ion_list[j].split('|')))
cos_list = list()
match_list = list()
id_list = list()
prod_list = list()
for k in ion_list:
score, n_matches = modified_cosine.pair(i_spectrum, neg_mgf[k])
id_list.append(int(neg_mgf[k].get("feature_id")))
cos_list.append(score)
match_list.append(n_matches)
prod_list.append(score * n_matches)
tmp_prod_list.append(max(prod_list))
tmp_cosine_list.append(cos_list[prod_list.index(max(prod_list))])
tmp_match_list.append(match_list[prod_list.index(max(prod_list))])
tmp_id_list.append(id_list[prod_list.index(max(prod_list))])
tmp_cluster_list.append(j)
tmp_table = pd.DataFrame(list(zip(tmp_id_list, tmp_cluster_list, tmp_cosine_list, tmp_prod_list, tmp_match_list)),
columns = ['ion_feature_id', 'cluster_id', 'cosine', 'prod', 'matches'])
tmp_table = tmp_table.loc[tmp_table['prod'].idxmax()]
counter_feature_id = tmp_table['ion_feature_id']
counter_idx = neg_node_table.index[neg_node_table['feature_id'] == counter_feature_id][0]
full_node_1.append(i)
full_node_2.append(counter_idx)
full_cluster_ids.append(tmp_table['cluster_id'])
full_cosine.append(round(tmp_table['cosine'], 2))
full_matches.append(tmp_table['matches'])
pos_node_table = node_table[node_table['ion_mode'] == "POS"]
cluster_ion_list = pd.Series(index = cluster_list_pos, dtype = int)
for i in cluster_ion_list.index:
tmp_rows = pos_node_table.index[pos_node_table['cluster_id'] == i]
cluster_ion_list[i] = '|'.join(pos_node_table.loc[tmp_rows, 'mgf_index'].dropna().astype(int).astype(str))
cluster_ion_list.sort_index(inplace = True)
for i in tqdm(remains_ions_pos):
i_id = int(node_table.loc[i, "feature_id"])
i_spectrum = pos_mgf[pos_mgf_data[i_id]]
tmp_cluster_list = []
tmp_id_list = []
tmp_cosine_list = []
tmp_prod_list = []
tmp_match_list = []
for j in cluster_ion_list.index:
ion_list = list(map(int, cluster_ion_list[j].split('|')))
cos_list = list()
match_list = list()
id_list = list()
prod_list = list()
for k in ion_list:
score, n_matches = modified_cosine.pair(i_spectrum, pos_mgf[k])
id_list.append(int(pos_mgf[k].get("feature_id")))
cos_list.append(score)
match_list.append(n_matches)
prod_list.append(score * n_matches)
tmp_prod_list.append(max(prod_list))
tmp_cosine_list.append(cos_list[prod_list.index(max(prod_list))])
tmp_match_list.append(match_list[prod_list.index(max(prod_list))])
tmp_id_list.append(id_list[prod_list.index(max(prod_list))])
tmp_cluster_list.append(j)
tmp_table = pd.DataFrame(list(zip(tmp_id_list, tmp_cluster_list, tmp_cosine_list, tmp_prod_list, tmp_match_list)),
columns = ['ion_feature_id', 'cluster_id', 'cosine', 'prod', 'matches'])
tmp_table = tmp_table.loc[tmp_table['prod'].idxmax()]
counter_feature_id = tmp_table['ion_feature_id']
counter_idx = pos_node_table.index[pos_node_table['feature_id'] == counter_feature_id][0]
full_node_1.append(i)
full_node_2.append(counter_idx)
full_cluster_ids.append(tmp_table['cluster_id'])
full_cosine.append(round(tmp_table['cosine'], 2))
full_matches.append(tmp_table['matches'])
cosine_table = pd.DataFrame()
cosine_table['node_1'] = full_node_1
cosine_table['node_2'] = full_node_2
cosine_table['cluster_id'] = full_cluster_ids
cosine_table['cosine'] = full_cosine
cosine_table['matches'] = full_matches
cosine_table = cosine_table[cosine_table['cosine'] >= cosiner_threshold]
cosine_table = cosine_table[cosine_table['matches'] >= matched_peaks]
edge_table['cosine_score'] = [0.0]*len(edge_table)
for i in tqdm(cosine_table.index):
cosined_ion = cosine_table.loc[i, "node_1"]
linked_ion = cosine_table.loc[i, "node_2"]
old_edge = edge_table.index[edge_table['node_1'] == cosined_ion][0]
if (edge_table.loc[old_edge, "status"] == "neg_self_edge") or (edge_table.loc[old_edge, "status"] == "pos_self_edge"):
edge_table.drop(old_edge, inplace = True)
node_table.loc[cosined_ion, "status"] = node_table.loc[cosined_ion, "status"][:4] + "cossingleton"
node_table.loc[cosined_ion, "status_universal"] = "cossingleton"
node_table.loc[cosined_ion, "cluster_id"] = cosine_table.loc[i, "cluster_id"]
rt_gap = round(abs(node_table.loc[cosined_ion, "rt"] - node_table.loc[linked_ion, "rt"]),3)
mz_gap = round(abs(node_table.loc[cosined_ion, "mz"] - node_table.loc[linked_ion, "mz"]),4)
cosine_score = cosine_table.loc[i, "cosine"]
ion_mode = node_table.loc[cosined_ion, "ion_mode"]
n_matches = cosine_table.loc[i, "matches"]
new_edge = max(edge_table.index) + 1
if single_mode == "BOTH":
edge_table.loc[new_edge] = [linked_ion, cosined_ion, n_matches, 0, 0, rt_gap, mz_gap,
ion_mode.lower() + "_singcos_edge", "singcos_edge", None, None, cosine_score, ion_mode, cosine_score]
else:
edge_table.loc[new_edge] = [linked_ion, cosined_ion, n_matches, 0, 0, rt_gap, mz_gap,
"singcos_edge", None, None, cosine_score, single_mode, cosine_score]
edge_table.reset_index(drop = True, inplace = True)
neutral_idx = list(node_table.index[node_table['status'].str.contains('neutral')])
for neutral in tqdm(neutral_idx):
tmp_edge_table = edge_table[edge_table['node_1'] == neutral]
pos_ions = list(tmp_edge_table['node_2'][tmp_edge_table['ion_mode'] == "POS"])
neg_ions = list(tmp_edge_table['node_2'][tmp_edge_table['ion_mode'] == "NEG"])
tmp_edges = []
while len(pos_ions) > 1 :
ion_1 = pos_ions[0]
ion_1_mgf_idx = int(node_table.loc[ion_1, "mgf_index"])
ion_1_spectrum = pos_mgf[ion_1_mgf_idx]
ion_1_rt = node_table.loc[ion_1, "rt"]
ion_1_mz = node_table.loc[ion_1, "mz"]
pos_ions.remove(ion_1)
for ion_2 in pos_ions:
ion_2_mgf_idx = int(node_table.loc[ion_2, "mgf_index"])
ion_2_spectrum = pos_mgf[ion_2_mgf_idx]
ion_2_rt = node_table.loc[ion_2, "rt"]
ion_2_mz = node_table.loc[ion_2, "mz"]
rt_gap = round(ion_1_rt - ion_2_rt, 3)
mz_gap = round(ion_1_mz - ion_2_mz, 4)
score, n_matches = modified_cosine.pair(ion_1_spectrum, ion_2_spectrum)
tmp_edges.append((ion_1, ion_2, round(score, 2), rt_gap, mz_gap, "pos"))
while len(neg_ions) > 1 :
ion_1 = neg_ions[0]
ion_1_mgf_idx = int(node_table.loc[ion_1, "mgf_index"])
ion_1_spectrum = neg_mgf[ion_1_mgf_idx]
ion_1_rt = node_table.loc[ion_1, "rt"]
ion_1_mz = node_table.loc[ion_1, "mz"]
neg_ions.remove(ion_1)
for ion_2 in neg_ions:
ion_2_mgf_idx = int(node_table.loc[ion_2, "mgf_index"])
ion_2_spectrum = neg_mgf[ion_2_mgf_idx]
ion_2_rt = node_table.loc[ion_2, "rt"]
ion_2_mz = node_table.loc[ion_2, "mz"]
rt_gap = round(ion_1_rt - ion_2_rt, 3)
mz_gap = round(ion_1_mz - ion_2_mz, 4)
score, n_matches = modified_cosine.pair(ion_1_spectrum, ion_2_spectrum)
tmp_edges.append((ion_1, ion_2, round(score, 2), rt_gap, mz_gap, "neg"))
for edge in tmp_edges:
node_1 = edge[0]
node_2 = edge[1]
cos = edge[2]
if cos < cosine_threshold : continue
rt_gap = edge[3]
mz_gap = edge[4]
tmp_ion_mode = edge[5]
new_idx = max(edge_table.index) + 1
if single_mode == "BOTH":
edge_table.loc[new_idx] = [node_1, node_2, 0, 0, 0, rt_gap, mz_gap,
tmp_ion_mode + "_cos_edge", "cos_edge", None, None, cos, tmp_ion_mode.upper(),
cos]
else:
edge_table.loc[new_edge] = [node_1, node_2, 0, 0, 0, rt_gap, mz_gap,
"cos_edge", None, None, cos, single_mode, cos]
# Produce cosine clusters between singletons and non-molecular clustered precursors/fragments
non_molecular_clusters = list(set(node_table['cluster_id'].unique()) - set(cluster_list))
non_molecular_clusters.sort()
unclustered_ions = [i for i in node_table.index if node_table.loc[i, "cluster_id"] in non_molecular_clusters]
if single_mode == "BOTH":
remains_ions_neg = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_singleton'])
remains_ions_neg += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_precursor'])
remains_ions_neg_mgf = [node_table.loc[i, "mgf_index"] for i in remains_ions_neg]
remains_ions_neg_mgf = list(map(int, remains_ions_neg_mgf))
remains_ions_pos = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_singleton'])
remains_ions_pos += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_precursor'])
remains_ions_pos_mgf = [node_table.loc[i, "mgf_index"] for i in remains_ions_pos]
remains_ions_pos_mgf = list(map(int, remains_ions_pos_mgf))
elif single_mode == "POS":
remains_ions_pos = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_singleton'])
remains_ions_pos += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'pos_precursor'])
remains_ions_pos_mgf = [node_table.loc[i, "mgf_index"] for i in remains_ions_pos]
remains_ions_pos_mgf = list(map(int, remains_ions_pos_mgf))
elif single_mode == "NEG":
remains_ions_neg = list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_singleton'])
remains_ions_neg += list(node_table.loc[unclustered_ions].index[node_table.loc[unclustered_ions]['status'] == 'neg_precursor'])
remains_ions_neg_mgf = [node_table.loc[i, "mgf_index"] for i in remains_ions_neg]
remains_ions_neg_mgf = list(map(int, remains_ions_neg_mgf))
# Process NEG data:
singleton_clusters = list()
total_nodes = len(remains_ions_neg)
while len(remains_ions_neg) > 0 :
perc = round((1-(len(remains_ions_neg)/total_nodes))*100,1)
sys.stdout.write("\rClustering remaining singletons (NEG) : {0}%".format(perc))
sys.stdout.flush()
ion_i = remains_ions_neg[0]
ion_i_mgf = remains_ions_neg_mgf[0]
remains_ions_neg.remove(ion_i)
remains_ions_neg_mgf.remove(ion_i_mgf)
for ion_j in remains_ions_neg:
ion_j_mgf = remains_ions_neg_mgf[remains_ions_neg.index(ion_j)]
score, n_matches = modified_cosine.pair(neg_mgf[ion_i_mgf], neg_mgf[ion_j_mgf])
singleton_clusters.append((ion_i, ion_j, score, n_matches, "NEG"))
# Process POS data:
total_nodes = len(remains_ions_pos)
while len(remains_ions_pos) > 0 :
perc = round((1-(len(remains_ions_pos)/total_nodes))*100,1)
sys.stdout.write("\rClustering remaining singletons (POS) : {0}%".format(perc))
sys.stdout.flush()
ion_i = remains_ions_pos[0]
ion_i_mgf = remains_ions_pos_mgf[0]
remains_ions_pos.remove(ion_i)
remains_ions_pos_mgf.remove(ion_i_mgf)
for ion_j in remains_ions_pos:
ion_j_mgf = remains_ions_pos_mgf[remains_ions_pos.index(ion_j)]
score, n_matches = modified_cosine.pair(pos_mgf[ion_i_mgf], pos_mgf[ion_j_mgf])
singleton_clusters.append((ion_i, ion_j, score, n_matches, "POS"))
singleton_clusters = pd.DataFrame(singleton_clusters, columns = ['node_1', 'node_2', 'cos', 'matches', 'ion_mode'])
singleton_clusters = singleton_clusters[singleton_clusters['cos'] >= cosiner_threshold]
singleton_clusters = singleton_clusters[singleton_clusters['matches'] >= matched_peaks]
# Define the new clusters
node_pool = singleton_clusters['node_1'].tolist() + singleton_clusters['node_2'].tolist()
node_pool = list(set(node_pool))
node_pool.sort()
cluster_list = []
cluster_size_list = []
total_nodes = len(node_pool)
while len(node_pool) > 0:
new_cluster = [node_pool[0]]
cluster_size = 0
perc = round((1-(len(node_pool)/total_nodes))*100,1)
sys.stdout.write("\rDefining cosine singleton clusters : {0}%".format(perc))
sys.stdout.flush()
while cluster_size != len(new_cluster):
cluster_size = len(new_cluster)
tmp_idx = []
for i in new_cluster:
tmp_idx += list(singleton_clusters.index[singleton_clusters['node_1'] == i])
tmp_idx += list(singleton_clusters.index[singleton_clusters['node_2'] == i])
new_cluster += list(singleton_clusters.loc[tmp_idx, 'node_1'])
new_cluster += list(singleton_clusters.loc[tmp_idx, 'node_2'])
new_cluster = list(set(new_cluster))
new_cluster.sort()
node_pool = list(set(node_pool) - set(new_cluster))
cluster_size_list.append(len(new_cluster))
cluster_list.append('|'.join(list(map(str, new_cluster))))
cluster_table= | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
from random import randint
from time import sleep
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class BooksScraper:
"""Automated data collection tool (web-scraper) that is specifically
tailored to scrape data on Bookdepository based on specific category
keyword.
"""
def __init__(self, number_of_samples: int, category: str, export_to_csv: bool = False) -> None:
"""
Initialization
:param number_of_samples: The number of samples of data to be scraped.
:category: book category to be scraped
:param header: web browser information used to construct scraper headers.
:param baseurl: Url used to collect book categories for scraping.
:param bool export_to_csv: Should the scraped data be exported to csv?
"""
self.number_of_samples = number_of_samples
self.category = category
self._baseurl = "https://www.bookdepository.com/search?searchTerm="
self._header = {"User-Agent": "Mozilla/5.0"}
self._export_to_csv = export_to_csv
@property
def number_of_pages(self) -> int:
"""
Calculates number of pages that will be scraped
based on number of samples user wants to get.
By default, each page has 30 samples.
:param : None
:return: number of pages that will be scraped.
"""
try:
if self.number_of_samples < 30:
raise ValueError(
"Number of samples must be equal to or larger than 30.")
return int(round(self.number_of_samples / 30))
except TypeError:
raise TypeError(
"Number of samples must be of integer type.")from None
def get_page_response(self, base_url: str, category: str, number_of_pages: int, header: dict) -> BeautifulSoup:
"""
Retrieves response from book depository server.
:param url: desired url.
:category: book category to be scraped. Constructed during Initialization.
:param number_of_pages: The number of pages of data to be scraped.
:param header: identification needed for scraping. Constructed during Initialization.
:return: response. if connection blocked prints error message.
"""
for page in range(1, number_of_pages+1):
url = base_url + category + "&page=" + str(page)
page = requests.get(url, headers=header)
soup = BeautifulSoup(page.content, "html.parser")
if not page.ok:
print(f"There is a {page} error. Please check your URL.")
else:
return soup
@staticmethod
def get_book_authors(soup, number_of_pages) -> list:
"""
Function which gathers book authors from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book author to the selected list. If not provided None is returned.
"""
authors = []
try:
for page in range(1, number_of_pages+1):
authors.extend([author.text for author in soup.find_all(
"span", attrs={"itemprop": "name"})])
return authors
except ValueError:
return np.nan
@staticmethod
def get_book_titles(soup, number_of_pages) -> list:
"""
Function which gathers book titles from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book title to the selected list. If not provided None is returned.
"""
titles = []
try:
for page in range(1, number_of_pages+1):
titles.extend(
[title.text for title in soup.find_all("h3", class_="title")])
return titles
except ValueError:
return np.nan
@staticmethod
def get_book_prices(soup, number_of_pages) -> list:
"""
Function which gathers book prices from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book price to the selected list. If not provided None is returned.
"""
prices = []
try:
for page in range(1, number_of_pages+1):
prices.extend(
[price.text for price in soup.find_all("p", class_="price")])
return prices
except ValueError:
return np.nan
@staticmethod
def get_book_editions(soup, number_of_pages) -> list:
"""
Function which gathers book editions from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book price to the selected list. If not provided None is returned.
"""
editions = []
try:
for page in range(1, number_of_pages+1):
editions.extend(
[edition.text for edition in soup.find_all("p", class_="format")])
return editions
except ValueError:
return np.nan
@staticmethod
def get_book_dates(soup, number_of_pages) -> list:
"""
Function which gathers book prices from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book price to the selected list. If not provided None is returned.
"""
publish_dates = []
try:
for page in range(1, number_of_pages+1):
publish_dates.extend(
[date.text for date in soup.find_all("p", class_="published")])
return publish_dates
except ValueError:
return np.nan
@staticmethod
def get_book_item_urls(soup, number_of_pages) -> list:
"""
Function which gathers books item url from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book's item url to the selected list. If not provided None is returned.
"""
item_urls = []
item_url_prefix = "https://www.bookdepository.com"
try:
for page in range(1, number_of_pages+1):
item_urls.extend([item_url_prefix+url.a['href']
for url in soup.find_all("div", attrs={"class": "book-item"})])
return item_urls
except ValueError:
return np.nan
@staticmethod
def get_book_image_urls(soup, number_of_pages) -> list:
"""
Function which gathers books image url from the provided
BeautifulSoup object.
:param soup: BeautifulSoup object containing book info.
:param number_of_pages: The number of pages of data to be scraped.
:return: appends the book's image url to the selected list. If not provided None is returned.
"""
image_urls = []
try:
for page in range(1, number_of_pages+1):
image_urls.extend([image['data-lazy']
for image in soup.find_all("img", class_="lazy")])
sleep(randint(1, 3))
return image_urls
except ValueError:
return np.nan
def collect_information(self) -> pd.DataFrame:
"""
Function which combines all functions required for scraping.
:param : None
:return: pandas dataFrame.
"""
print(
f">>>>Now collecting information on {self.category} books for {self.number_of_pages} page(s)<<<<")
print(">>>>Please be patient this might take a while:)<<<<")
soup = self.get_page_response(
self._baseurl, self.category, self.number_of_pages, self._header)
title = self.get_book_titles(soup, self.number_of_pages)
author = self.get_book_authors(soup, self.number_of_pages)
price = self.get_book_prices(soup, self.number_of_pages)
edition = self.get_book_editions(soup, self.number_of_pages)
publish_date = self.get_book_dates(soup, self.number_of_pages)
item_url = self.get_book_item_urls(soup, self.number_of_pages)
image_url = self.get_book_image_urls(soup, self.number_of_pages)
nested_book_details = [title, author, price,
edition, publish_date, item_url, image_url]
columns = ["title", "author", "price", "edition",
"publish_date", "item_url", "image_url"]
book_details = | pd.DataFrame(nested_book_details, columns) | pandas.DataFrame |
from airflow.decorators import dag, task
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.providers.postgres.operators.postgres import PostgresOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Variable
from datetime import datetime, timedelta
from acona_postgres_tools import acona_truncate_table, acona_data_write
# [END import_module]
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow'
}
# [END default_args]
# [START instantiate_dag]
@dag(
default_args=default_args,
start_date=days_ago(2),
tags=['prophet'],
schedule_interval='0 5 * * 0')
def acona_forecast():
# [END instantiate_dag]
# [START forecast]
@task()
def forecast(metric):
"""
#### Get historic data from Warehouse to generate forecasts
"""
import json
import requests
import os
import urllib.parse
import pandas as pd
import numpy as np
from prophet import Prophet
WAREHOUSE_TOKEN = Variable.get("WAREHOUSE_TOKEN")
WAREHOUSE_URL = Variable.get("WAREHOUSE_URL")
output = {}
df = {}
result = {}
# Load urls (for specific domain only?)
urls = os.popen('curl ' + WAREHOUSE_URL + '/rpc/acona_urls -H "Authorization: Bearer ' + WAREHOUSE_TOKEN + '"').read()
forecasts = {}
forecasted_lower = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#funtions
def degree(G,f):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
degree_dic = nx.degree_centrality(G)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
f = pd.merge(f, degree_df, on='name')
return f
def centrality(G,f):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
centrality_dic = nx.degree_centrality(G)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
f = pd.merge(f, centrality_df, on='name')
return f
def betweenness(G,f):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
betweenness_dic = nx.betweenness_centrality(G)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
f = pd.merge(f, betweenness_df, on='name')
return f
def pagerank(G,f):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
pagerank_dic = nx.pagerank(G)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
f = pd.merge(f, pagerank_df, on='name')
return f
def clustering(G,f):
"""
Adds a column to the dataframe f with the clustering coeficient of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
clustering_dic = nx.clustering(G)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
f = pd.merge(f, clustering_df, on='name')
return f
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def communities_label_propagation(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using glabel propagation.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_gen = nx.algorithms.community.label_propagation_communities(G)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def mean_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the mean value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
mean_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
mean_neighbors[i] = f[neighbors.tolist()[0]][column].mean()
f["mean_neighbors"] = mean_neighbors
return f
def std_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the standar desviation value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
std_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
std_neighbors[i] = f[neighbors.tolist()[0]][column].std()
f["std_neighbors"] = std_neighbors
return f
def max_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the maximum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
max_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
max_neighbors[i] = f[neighbors.tolist()[0]][column].max()
f["max_neighbors"] = max_neighbors
return f
def min_neighbors(G,f,column,n=1):
"""
Adds a column to the dataframe f with the minimum value of its neigbors feature.
G: a networkx graph.
f: a pandas dataframe.
column: the column to which the mean is applied.
n: neighbourhood order.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
min_neighbors = np.zeros([f.shape[0]])
matrix = nx.to_numpy_matrix(G)
for e in range(1,n):
matrix += matrix ** e
for i in f.index:
neighbors = matrix[i]>0
min_neighbors[i] = f[neighbors.tolist()[0]][column].min()
f["min_neighbors"] = min_neighbors
return f
def within_module_degree(G,f, column_communities = None, community_method = "label_propagation"):
"""
the within_module_degree calculates: Zi = (ki-ks)/Ss
Ki = number of links between the node i and all the nodes of its cluster
Ks = mean degree of the nodes in cluster s
Ss = the standar desviation of the nodes in cluster s
The within-module degree z-score measures how well-connected node i is to other nodes in the module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
column_communities: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
community_method: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
z_df = pd.DataFrame(data = {'name': [], 'within_module_degree': [] })
for comutnity in set(f[column_communities]):
G2 = G.subgraph(f[f[column_communities] == comutnity]["name"].values)
Ks = 2*len(G2.edges) / len(G2.nodes)
Ss = np.std([i[1] for i in G2.degree()])
z_df = pd.concat([z_df,pd.DataFrame(data = {'name': list(G2.nodes), 'within_module_degree': [(i[1]-Ks)/Ss for i in G2.degree()] }) ])
f = pd.merge(f, z_df, on='name')
return f
def participation_coefficient(G,f, column_communities = None, community_method = "label_propagation"):
"""
the participation_coefficient calculates: Pi = 1- sum_s( (Kis/Kit)^2 )
Kis = number of links between the node i and the nodes of the cluster s
Kit = degree of the node i
The participation coefficient of a node is therefore close to 1 if its links are uniformly distributed among all the modules and 0 if all its links are within its own module.
PAPER: <NAME>., & <NAME>. (2005). Functional cartography of complex metabolic networks. nature, 433(7028), 895.
G: a networkx graph.
f: a pandas dataframe.
columna_comunidades: a column of the dataframe with the communities for each node. If None, the communities will be estimated using metodo comunidades.
metodo_comunidades: method to calculate the communities in the graph G if they are not provided with columna_comunidades.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
if column_communities == None:
if community_method == "label_propagation":
f = communities_label_propagation(G,f)
column_communities = "communities_label_propagation"
elif community_method == "greedy_modularity":
f = communities_greedy_modularity(G,f)
column_communities = "communities_greedy_modularity"
else:
raise ValueError('A clustering method should be provided.')
p_df = pd.DataFrame(data = {'name': f['name'], 'participation_coefficient': [1 for _ in f['name']] })
for node in f['name']:
Kit = len(G.edges(node))
for comutnity in set(f[column_communities]):
Kis = len([edge for edge in G.edges(node) if edge[1] in f[ f[column_communities] == comutnity ]["name"]])
p_df.loc[ p_df["name"] == node, 'participation_coefficient' ] -= ( Kis / Kit ) ** 2
f = | pd.merge(f, p_df, on='name') | pandas.merge |
#-*- coding: utf-8 -*-
"""
revision process for:
"tertiaryElectricityConsumption_1092915978"
"tertiaryElectricityConsumption_7104124143"
"gasConsumption_1092915978"
"gasConsumption_5052736858"
"gasConsumption_3230658933"
"gasConsumption_7104124143"
"gasConsumption_8801761586"
"""
import calendar
from datetime import datetime, timedelta
import happybase
import json
import sys
import pandas as pd
import pyhs2
from hive_functions import create_hive_table_from_hbase_table
from pymongo import MongoClient
def calculate_frequency(dataset):
if len(dataset.index) > 1:
return (pd.Series(dataset.index[1:]) - pd.Series(dataset.index[:-1])).value_counts().index[0]
else:
return None
def datetime_to_timestamp(ts):
# Input data is always in UTC and the timestamp stored in HBase must be in UTC timezone.
try:
return calendar.timegm(ts.to_pydatetime().utctimetuple())
except:
return None
config = json.load(open("module_edinet/config.json"))
hbase = happybase.Connection(config['hbase']['host'], int(config['hbase']['port']), timeout=90000)
hive = pyhs2.connect(host=config['hive']['host'],
port=int(config['hive']['port']),
authMechanism='PLAIN', user=config['hive']['username'], password="")
def export_table(table_name):
old_keys = [["ts","bigint"], ["deviceId","string"]]
columns = [["value", "float", "m:v"]]
cur = hive.cursor()
old_table_hive = create_hive_table_from_hbase_table(cur, table_name, table_name, old_keys, columns, "migration000001")
cur.execute("select * from {}".format(old_table_hive))
data = []
while cur.hasMoreRows:
try:
ret_val = cur.fetchone()
if not ret_val:
print("returned None")
continue
key, value = ret_val
key = json.loads(key)
data.append({"device": key['deviceid'], "ts": datetime.utcfromtimestamp(float(key['ts'])), "value": value})
except Exception as e:
print("key: {}".format(key))
print("value: {}".format(value))
print ("******************************************************")
df = pd.DataFrame(data)
sentence = "DROP TABLE {}".format(old_table_hive)
cur.execute(sentence)
return df
df.to_csv("exported-{}.csv".format(table_name))
def export_table_2(table_name):
old_keys = [["ts_ini","bigint"], ["ts_end","bigint"], ["deviceId","string"]]
columns = [["value", "float", "m:v"]]
cur = hive.cursor()
old_table_hive = create_hive_table_from_hbase_table(cur, table_name, table_name, old_keys, columns, "migration000001")
cur.execute("select * from {}".format(old_table_hive))
data = []
while cur.hasMoreRows:
try:
ret_val = cur.fetchone()
if not ret_val:
print("returned None")
continue
key, value = ret_val
key = json.loads(key)
if key['ts_ini']:
data.append({"device": key['deviceid'], "ts_end": datetime.utcfromtimestamp(float(key['ts_end'])), "ts_ini": datetime.utcfromtimestamp(float(key['ts_ini'])), "value": value})
else:
data.append({"device": key['deviceid'], "ts_end": datetime.utcfromtimestamp(float(key['ts_end'])), "ts_ini": None, "value": value})
except Exception as e:
print("key: {}".format(key))
print("value: {}".format(value))
print ("******************************************************")
df = pd.DataFrame(data)
sentence = "DROP TABLE {}".format(old_table_hive)
cur.execute(sentence)
df.to_csv("exported-{}.csv".format(table_name))
def read_analysis(file):
df = pd.read_csv(file)
df = df.set_index("ts")
df.index = pd.to_datetime(df.index)
for _, x in df.groupby("device"):
print(calculate_frequency(x))
print(x[["value"]])
def read_analysis_2(file):
df = pd.read_csv(file)
df = df.set_index("ts_ini")
df.index = pd.to_datetime(df.index)
df.ts_end = pd.to_datetime(df.ts_end)
for _, x in df.groupby("device"):
print(x.ts_end-x.index)
#print(x[["ts_end","value"]])
def clean_error(df, table_name):
df = df.set_index("ts")
df.index = pd.to_datetime(df.index)
#new_table_name = "edinet_billing_{}".format(table_name)
# try:
# hbase.create_table(new_table_name, {'m': dict()})
# except:
# pass
# hbase_table = hbase.table(new_table_name)
for device, x in df.groupby("device"):
dfx = x[x.index <= datetime.now()]
#dfx = dfx[dfx.index.day>20]
dfx['ts_end'] = dfx.index
dfx['ts_ini'] = dfx.ts_end.shift(+1)+timedelta(days=1)
print(dfx)
# batch = hbase_table.batch()
# for _, v in dfx.iterrows():
# key = "{}~{}~{}".format(datetime_to_timestamp(v['ts_ini']),
# datetime_to_timestamp(v['ts_end']),
# device)
# row = {"m:v": str(v['value'])}
# batch.put(key, row)
# batch.send()
import matplotlib.pyplot as plt
import pandas as pd
from pymongo import MongoClient
import plotly.offline as py
import plotly.graph_objs as go
from plotly import tools
mongo_old= MongoClient("192.168.127.12", 27017)
mongo_old = mongo_old["edinet_rest_service"]
mongo_old.authenticate("cimne-edinet","3nm1C--3d1n3t")
mongo_new = MongoClient("192.168.3.11", 27017)
mongo_new = mongo_new['edinet']
mongo_new.authenticate("bgruser", "g<PASSWORD>")
def review_devices(mongo_old, mongo_new, ini = None, end = None):
energy_type_map={"tertiaryElectricityConsumption":"electricityConsumption",
"monthlyElectricityConsumption": "electricityConsumption",
"monthlyGasConsumption": "gasConsumption"}
data_old = mongo_old['raw_data'].find({})
devices = data_old.distinct('deviceId')
data_final = {}
if ini and end:
devices = devices[ini:end]
elif ini:
devices = devices[ini:]
elif end:
devices = devices[:end]
total = len(devices)
for number_done, deviceId in enumerate(devices):
print("{}/{}".format(number_done,total))
try:
data_old = mongo_old['raw_data'].find({"deviceId": deviceId})
data_new = mongo_new['raw_data'].find({"device": deviceId})
data_map = {}
for i, x in enumerate(data_old):
df = pd.DataFrame({"ts": x['timestamps'], "value": x['values']})
df.index = pd.to_datetime(df["ts"])
df.value = | pd.to_numeric(df.value) | pandas.to_numeric |
import pandas as pd
df4 = pd.read_csv('../data/readme_train.csv', sep=';')
df5 = | pd.read_csv('../data/abstracts.csv', sep=';') | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
from biom.table import Table
import pandas as pd
import pandas.testing as pdt
from q2_feature_table import merge, merge_seqs, merge_taxa
from q2_feature_table._merge import _merge_feature_data, _get_overlapping
class MergeTableTests(unittest.TestCase):
def test_single_table(self):
t = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t])
self.assertEqual(t, obs)
def test_valid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2])
exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'features are present'):
merge([t1, t2], 'error_on_overlapping_feature')
def test_valid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'error_on_overlapping_feature')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'samples.*S1'):
merge([t1, t2])
def test_invalid_overlap_method(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaisesRegex(ValueError, 'overlap method'):
merge([t1, t2], 'peanut')
def test_sum_full_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 9], [3, 3, 6]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_triple_overlap(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'sum')
exp = Table(np.array([[3, 3, 3], [3, 3, 3]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_sum_some_overlap(self):
# Did I stutter?
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S2', 'S5'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 3, 3, 0, 6], [1, 1, 2, 0, 0],
[0, 2, 0, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5'])
self.assertEqual(obs, exp)
def test_sum_overlapping_sample_ids(self):
# This should produce the same result as `error_on_overlapping_feature`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S1', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 0], [1, 1, 2, 0, 0],
[0, 0, 0, 2, 6], [2, 0, 0, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_sum_overlapping_feature_ids(self):
# This should produce the same result as `error_on_overlapping_sample`
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge([t1, t2], 'sum')
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_average(self):
t1 = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1] * 3, 'average')
exp = Table(np.array([[1, 1, 1], [1, 1, 1]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
def test_average_relative_frequency(self):
t1 = Table(np.array([[0.75, 0.75, 0.75], [0.75, 0.75, 0.75]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
obs = merge([t1, t2], 'average')
exp = Table(np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
self.assertEqual(obs, exp)
class UtilTests(unittest.TestCase):
def test_get_overlapping(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(['S1']), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(['O1']), obs)
def test_get_overlapping_no_overlap(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'], ['S4', 'S5', 'S6'])
# samples
obs = _get_overlapping([t1, t2], 'sample')
self.assertEqual(set(), obs)
# features
obs = _get_overlapping([t1, t2], 'observation')
self.assertEqual(set(), obs)
def test_get_overlapping_multiple(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'], ['S1', 'S5', 'S6'])
t3 = Table(np.array([[3, 3, 1], [0, 2, 1]]),
['O1', 'O2'], ['S1', 'S3', 'S6'])
# samples
obs = _get_overlapping([t1, t2, t3], 'sample')
self.assertEqual({'S1', 'S3', 'S6'}, obs)
# features
obs = _get_overlapping([t1, t2, t3], 'observation')
self.assertEqual({'O1', 'O2'}, obs)
class MergeFeatureDataTests(unittest.TestCase):
def test_merge_single(self):
d = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
obs = _merge_feature_data([d])
pdt.assert_series_equal(obs, d)
def test_valid_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_first_feature_data_retained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
# swapping input order changes f1 data
obs = _merge_feature_data([d2, d1])
exp = pd.Series(['ACGAAA', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_multiple_overlapping_feature_ids_order_maintained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
d3 = pd.Series(['AGGA', 'ATAT'], index=['f3', 'f4'])
obs = _merge_feature_data([d1, d2, d3])
exp = pd.Series(['ACGT', 'ACCT', 'ACCA', 'ATAT'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
# swapping input order changes f1 and f3
obs = _merge_feature_data([d3, d2, d1])
exp = pd.Series(['ACGAAA', 'ACCT', 'AGGA', 'ATAT'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f3', 'f4'])
obs = _merge_feature_data([d1, d2])
exp = pd.Series(['ACGT', 'ACCT', 'ACGT', 'ACCA'],
index=['f1', 'f2', 'f3', 'f4'])
| pdt.assert_series_equal(obs, exp) | pandas.testing.assert_series_equal |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_130
from cudf.core.column import ColumnBase
from cudf.core.dtypes import (
CategoricalDtype,
Decimal32Dtype,
Decimal64Dtype,
Decimal128Dtype,
IntervalDtype,
ListDtype,
StructDtype,
)
from cudf.testing._utils import assert_eq
from cudf.utils.dtypes import np_to_pa_dtype
def test_cdt_basic():
psr = pd.Series(["a", "b", "a", "c"], dtype="category")
sr = cudf.Series(["a", "b", "a", "c"], dtype="category")
assert isinstance(sr.dtype, CategoricalDtype)
assert_eq(sr.dtype.categories, psr.dtype.categories)
@pytest.mark.parametrize(
"data", [None, [], ["a"], [1], [1.0], ["a", "b", "c"]]
)
@pytest.mark.parametrize("ordered", [None, False, True])
def test_cdt_eq(data, ordered):
dt = cudf.CategoricalDtype(categories=data, ordered=ordered)
assert dt == "category"
assert dt == dt
assert dt == cudf.CategoricalDtype(categories=None, ordered=ordered)
assert dt == cudf.CategoricalDtype(categories=data, ordered=ordered)
assert not dt == cudf.CategoricalDtype(
categories=data, ordered=not ordered
)
@pytest.mark.parametrize(
"data", [None, [], ["a"], [1], [1.0], ["a", "b", "c"]]
)
@pytest.mark.parametrize("ordered", [None, False, True])
def test_cdf_to_pandas(data, ordered):
assert (
pd.CategoricalDtype(data, ordered)
== cudf.CategoricalDtype(categories=data, ordered=ordered).to_pandas()
)
@pytest.mark.parametrize(
"value_type",
[
int,
"int32",
np.int32,
"datetime64[ms]",
"datetime64[ns]",
"str",
"object",
],
)
def test_list_dtype_pyarrow_round_trip(value_type):
pa_type = pa.list_(cudf.utils.dtypes.np_to_pa_dtype(np.dtype(value_type)))
expect = pa_type
got = ListDtype.from_arrow(expect).to_arrow()
assert expect.equals(got)
def test_list_dtype_eq():
lhs = ListDtype("int32")
rhs = ListDtype("int32")
assert lhs == rhs
rhs = ListDtype("int64")
assert lhs != rhs
def test_list_nested_dtype():
dt = ListDtype(ListDtype("int32"))
expect = ListDtype("int32")
got = dt.element_type
assert expect == got
@pytest.mark.parametrize(
"fields",
[
{},
{"a": "int64"},
{"a": "datetime64[ms]"},
{"a": "int32", "b": "int64"},
],
)
def test_struct_dtype_pyarrow_round_trip(fields):
pa_type = pa.struct(
{
k: cudf.utils.dtypes.np_to_pa_dtype(np.dtype(v))
for k, v in fields.items()
}
)
expect = pa_type
got = StructDtype.from_arrow(expect).to_arrow()
assert expect.equals(got)
def test_struct_dtype_eq():
lhs = StructDtype(
{"a": "int32", "b": StructDtype({"c": "int64", "ab": "int32"})}
)
rhs = StructDtype(
{"a": "int32", "b": StructDtype({"c": "int64", "ab": "int32"})}
)
assert lhs == rhs
rhs = StructDtype({"a": "int32", "b": "int64"})
assert lhs != rhs
lhs = StructDtype({"b": "int64", "a": "int32"})
assert lhs != rhs
@pytest.mark.parametrize(
"fields",
[
{},
{"a": "int32"},
{"a": "object"},
{"a": "str"},
{"a": "datetime64[D]"},
{"a": "int32", "b": "int64"},
{"a": "int32", "b": StructDtype({"a": "int32", "b": "int64"})},
],
)
def test_struct_dtype_fields(fields):
fields = {"a": "int32", "b": StructDtype({"c": "int64", "d": "int32"})}
dt = StructDtype(fields)
assert_eq(dt.fields, fields)
@pytest.mark.parametrize(
"decimal_type",
[cudf.Decimal32Dtype, cudf.Decimal64Dtype, cudf.Decimal128Dtype],
)
def test_decimal_dtype_arrow_roundtrip(decimal_type):
dt = decimal_type(4, 2)
assert dt.to_arrow() == pa.decimal128(4, 2)
assert dt == decimal_type.from_arrow(pa.decimal128(4, 2))
@pytest.mark.parametrize(
"decimal_type,max_precision",
[
(cudf.Decimal32Dtype, 9),
(cudf.Decimal64Dtype, 18),
(cudf.Decimal128Dtype, 38),
],
)
def test_max_precision(decimal_type, max_precision):
decimal_type(scale=0, precision=max_precision)
with pytest.raises(ValueError):
decimal_type(scale=0, precision=max_precision + 1)
@pytest.fixture(params=["int64", "int32"])
def subtype(request):
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
return request.param
def test_interval_dtype_pyarrow_round_trip(subtype, closed):
pa_array = | pd.core.arrays._arrow_utils.ArrowIntervalType(subtype, closed) | pandas.core.arrays._arrow_utils.ArrowIntervalType |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# linkedin_jog_scraping.py
import os
import pandas as pd
from parsel import Selector
from time import sleep
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
if config.get('HEADLESS', 'headless') == 'Yes':
# install webdrive when needed runing headless
opts=webdriver.ChromeOptions()
opts.headless=True
driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)
else:
# install webdrive when needed runing browser
driver = webdriver.Chrome(ChromeDriverManager().install())
print('\nExecuting Linkedin Login...')
# driver.get method() will navigate to a page given by the URL address
driver.get('https://www.linkedin.com/login')
# locate email form by element_by_id
username = driver.find_element_by_id('username')
# send_keys() to simulate key strokes
username.send_keys(config.get('LINKEDIN_LOGIN', 'email'))
# locate password form by_class_name
password = driver.find_element_by_id('password')
# send_keys() to simulate key strokes
password.send_keys(config.get('LINKEDIN_LOGIN', 'password'))
# locate submit button by_class_name
log_in_button = driver.find_element_by_class_name('btn__primary--large')
# locate submit button by_xpath
log_in_button = driver.find_element_by_xpath('//*[@type="submit"]')
log_in_button.click()
print('\nStarting Posting Search...')
# driver goest to the jobs page
driver.get('https://www.linkedin.com/jobs/')
sleep(2)
# Start search term
search_job = driver.find_element_by_xpath('//*[@type="text"]')
search_job.send_keys(config.get('LINKEDIN_LOGIN', 'search_term'))
sleep(1)
#search.send_keys(Keys.RETURN)
# location
search_location = driver.find_element_by_xpath('//input[starts-with(@id,"jobs-search-box-location")]')
search_location.send_keys(Keys.COMMAND, 'a') #COMMAND is the mac keyboard control
search_location.send_keys(Keys.BACKSPACE)
search_location.send_keys(config.get('LINKEDIN_LOGIN', 'country'))
search_location.send_keys(Keys.RETURN)
sleep(3)
# Gets the URL from the search result
linkedin_result = driver.current_url
# Scroll job list to the end of first page
recentList = driver.find_elements_by_class_name('jobs-search-results__list-item')
for list in recentList :
driver.execute_script("arguments[0].scrollIntoView();", list)
sleep(0.1)
# Get full list of positions name
position_name = driver.find_elements_by_class_name('job-card-list__title')
position_name = [url.text for url in position_name]
position_name
len(position_name)
# Get listing Company Name
company_name = driver.find_elements_by_css_selector('.job-card-container__company-name')
company_name = [url.text for url in company_name]
company_name
len(company_name)
# Get listing location
job_location = driver.find_elements_by_xpath('//div[starts-with(@class,"artdeco-entity-lockup__caption")]')
job_location = [url.text for url in job_location]
job_location
len(job_location)
# Get full list of links positions
position_link = driver.find_elements_by_css_selector("div.artdeco-entity-lockup__title > a")
position_link = [link.get_attribute("href") for link in position_link]
position_link
len(position_link)
urls_linkedin = []
for lin in position_link:
terminator = lin.index('?')
urls_linkedin.append(lin[:terminator])
if os.path.isfile('opportunities.csv') is True:
opportunities = pd.read_csv('opportunities.csv')
else:
dict = {'Job Title': [], 'Company Name': [], 'Location': [], 'Direct URL': [], 'TrimmedLinkedin' : [],'LinkedinLink': []}
df = | pd.DataFrame(dict) | pandas.DataFrame |
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.topology import Molecule, Topology
from biopandas.pdb import PandasPdb
import matplotlib.pyplot as plt
from operator import itemgetter
from mendeleev import element
from simtk.openmm import app
from scipy import optimize
import subprocess as sp
from sys import stdout
import pandas as pd
import numpy as np
import statistics
import itertools
import parmed
import pickle
import shutil
import simtk
import scipy
import time
import math
import sys
import ast
import re
import os
BOHRS_PER_ANGSTROM = 0.529
HARTREE_PER_KCAL_MOL = 627.509391
#kcal/mol * A^2 to kJ/mol * nm^2
KCAL_MOL_PER_KJ_MOL = 4.184
ANGSTROMS_PER_NM = 10.0
RADIANS_PER_DEGREE = np.pi / 180.0
method_basis_scale_dict = {
"HF STO-3G": 0.817,
"HF 3-21G": 0.906,
"HF 3-21G*": 0.903,
"HF 6-31G": 0.903,
"HF 6-31G*": 0.899,
"HF 6-31G**": 0.903,
"HF 6-31+G**": 0.904,
"HF 6-311G*": 0.904,
"HF 6-311G**": 0.909,
"HF TZVP": 0.909,
"HF cc-pVDZ": 0.908,
"HF cc-pVTZ": 0.91,
"HF cc-pVQZ": 0.908,
"HF aug-cc-pVDZ": 0.911,
"HF aug-cc-pVTZ": 0.91,
"HF aug-cc-pVQZ": 0.909,
"HF daug-cc-pVDZ": 0.912,
"HF daug-cc-pVTZ": 0.905,
"ROHF 3-21G": 0.907,
"ROHF 3-21G*": 0.909,
"ROHF 6-31G": 0.895,
"ROHF 6-31G*": 0.89,
"ROHF 6-31G**": 0.855,
"ROHF 6-31+G**": 0.856,
"ROHF 6-311G*": 0.856,
"ROHF 6-311G**": 0.913,
"ROHF cc-pVDZ": 0.861,
"ROHF cc-pVTZ": 0.901,
"LSDA STO-3G": 0.896,
"LSDA 3-21G": 0.984,
"LSDA 3-21G*": 0.982,
"LSDA 6-31G": 0.98,
"LSDA 6-31G*": 0.981,
"LSDA 6-31G**": 0.981,
"LSDA 6-31+G**": 0.985,
"LSDA 6-311G*": 0.984,
"LSDA 6-311G**": 0.988,
"LSDA TZVP": 0.988,
"LSDA cc-pVDZ": 0.989,
"LSDA cc-pVTZ": 0.989,
"LSDA aug-cc-pVDZ": 0.989,
"LSDA aug-cc-pVTZ": 0.991,
"BLYP STO-3G": 0.925,
"BLYP 3-21G": 0.995,
"BLYP 3-21G*": 0.994,
"BLYP 6-31G": 0.992,
"BLYP 6-31G*": 0.992,
"BLYP 6-31G**": 0.992,
"BLYP 6-31+G**": 0.995,
"BLYP 6-311G*": 0.998,
"BLYP 6-311G**": 0.996,
"BLYP TZVP": 0.998,
"BLYP cc-pVDZ": 1.002,
"BLYP cc-pVTZ": 0.997,
"BLYP aug-cc-pVDZ": 0.998,
"BLYP aug-cc-pVTZ": 0.997,
"B1B95 STO-3G": 0.883,
"B1B95 3-21G": 0.957,
"B1B95 3-21G*": 0.955,
"B1B95 6-31G": 0.954,
"B1B95 6-31G*": 0.949,
"B1B95 6-31G**": 0.955,
"B1B95 6-31+G**": 0.957,
"B1B95 6-311G*": 0.959,
"B1B95 6-311G**": 0.96,
"B1B95 TZVP": 0.957,
"B1B95 cc-pVDZ": 0.961,
"B1B95 cc-pVTZ": 0.957,
"B1B95 aug-cc-pVDZ": 0.958,
"B1B95 aug-cc-pVTZ": 0.959,
"B3LYP STO-3G": 0.892,
"B3LYP 3-21G": 0.965,
"B3LYP 3-21G*": 0.962,
"B3LYP 6-31G": 0.962,
"B3LYP 6-31G*": 0.96,
"B3LYP 6-31G**": 0.961,
"B3LYP 6-31+G**": 0.964,
"B3LYP 6-311G*": 0.966,
"B3LYP 6-311G**": 0.967,
"B3LYP TZVP": 0.965,
"B3LYP cc-pVDZ": 0.97,
"B3LYP cc-pVTZ": 0.967,
"B3LYP cc-pVQZ": 0.969,
"B3LYP aug-cc-pVDZ": 0.97,
"B3LYP aug-cc-pVTZ": 0.968,
"B3LYP aug-cc-pVQZ": 0.969,
"B3PW91 STO-3G": 0.885,
"B3PW91 3-21G": 0.961,
"B3PW91 3-21G*": 0.959,
"B3PW91 6-31G": 0.958,
"B3PW91 6-31G*": 0.957,
"B3PW91 6-31G**": 0.958,
"B3PW91 6-31+G**": 0.96,
"B3PW91 6-311G*": 0.963,
"B3PW91 6-311G**": 0.963,
"B3PW91 TZVP": 0.964,
"B3PW91 cc-pVDZ": 0.965,
"B3PW91 cc-pVTZ": 0.962,
"B3PW91 aug-cc-pVDZ": 0.965,
"B3PW91 aug-cc-pVTZ": 0.965,
"mPW1PW91 STO-3G": 0.879,
"mPW1PW91 3-21G": 0.955,
"mPW1PW91 3-21G*": 0.95,
"mPW1PW91 6-31G": 0.947,
"mPW1PW91 6-31G*": 0.948,
"mPW1PW91 6-31G**": 0.952,
"mPW1PW91 6-31+G**": 0.952,
"mPW1PW91 6-311G*": 0.954,
"mPW1PW91 6-311G**": 0.957,
"mPW1PW91 TZVP": 0.954,
"mPW1PW91 cc-pVDZ": 0.958,
"mPW1PW91 cc-pVTZ": 0.959,
"mPW1PW91 aug-cc-pVDZ": 0.958,
"mPW1PW91 aug-cc-pVTZ": 0.958,
"PBEPBE STO-3G": 0.914,
"PBEPBE 3-21G": 0.991,
"PBEPBE 3-21G*": 0.954,
"PBEPBE 6-31G": 0.986,
"PBEPBE 6-31G*": 0.986,
"PBEPBE 6-31G**": 0.986,
"PBEPBE 6-31+G**": 0.989,
"PBEPBE 6-311G*": 0.99,
"PBEPBE 6-311G**": 0.991,
"PBEPBE TZVP": 0.989,
"PBEPBE cc-pVDZ": 0.994,
"PBEPBE cc-pVTZ": 0.993,
"PBEPBE aug-cc-pVDZ": 0.994,
"PBEPBE aug-cc-pVTZ": 0.994,
"PBE1PBE STO-3G": 0.882,
"PBE1PBE 3-21G": 0.96,
"PBE1PBE 3-21G*": 0.96,
"PBE1PBE 6-31G": 0.956,
"PBE1PBE 6-31G*": 0.95,
"PBE1PBE 6-31G**": 0.953,
"PBE1PBE 6-31+G**": 0.955,
"PBE1PBE 6-311G*": 0.959,
"PBE1PBE 6-311G**": 0.959,
"PBE1PBE TZVP": 0.96,
"PBE1PBE cc-pVDZ": 0.962,
"PBE1PBE cc-pVTZ": 0.961,
"PBE1PBE aug-cc-pVDZ": 0.962,
"PBE1PBE aug-cc-pVTZ": 0.962,
"HSEh1PBE STO-3G": 0.883,
"HSEh1PBE 3-21G": 0.963,
"HSEh1PBE 3-21G*": 0.96,
"HSEh1PBE 6-31G": 0.957,
"HSEh1PBE 6-31G*": 0.951,
"HSEh1PBE 6-31G**": 0.954,
"HSEh1PBE 6-31+G**": 0.955,
"HSEh1PBE 6-311G*": 0.96,
"HSEh1PBE 6-311G**": 0.96,
"HSEh1PBE TZVP": 0.96,
"HSEh1PBE cc-pVDZ": 0.962,
"HSEh1PBE cc-pVTZ": 0.961,
"HSEh1PBE aug-cc-pVDZ": 0.962,
"HSEh1PBE aug-cc-pVTZ": 0.962,
"TPSSh 3-21G": 0.969,
"TPSSh 3-21G*": 0.966,
"TPSSh 6-31G": 0.962,
"TPSSh 6-31G*": 0.959,
"TPSSh 6-31G**": 0.959,
"TPSSh 6-31+G**": 0.963,
"TPSSh 6-311G*": 0.963,
"TPSSh TZVP": 0.964,
"TPSSh cc-pVDZ": 0.972,
"TPSSh cc-pVTZ": 0.968,
"TPSSh aug-cc-pVDZ": 0.967,
"TPSSh aug-cc-pVTZ": 0.965,
"B97D3 3-21G": 0.983,
"B97D3 6-31G*": 0.98,
"B97D3 6-31+G**": 0.983,
"B97D3 6-311G**": 0.986,
"B97D3 TZVP": 0.986,
"B97D3 cc-pVDZ": 0.992,
"B97D3 cc-pVTZ": 0.986,
"B97D3 aug-cc-pVTZ": 0.985,
"MP2 STO-3G": 0.872,
"MP2 3-21G": 0.955,
"MP2 3-21G*": 0.951,
"MP2 6-31G": 0.957,
"MP2 6-31G*": 0.943,
"MP2 6-31G**": 0.937,
"MP2 6-31+G**": 0.941,
"MP2 6-311G*": 0.95,
"MP2 6-311G**": 0.95,
"MP2 TZVP": 0.948,
"MP2 cc-pVDZ": 0.953,
"MP2 cc-pVTZ": 0.95,
"MP2 cc-pVQZ": 0.948,
"MP2 aug-cc-pVDZ": 0.959,
"MP2 aug-cc-pVTZ": 0.953,
"MP2 aug-cc-pVQZ": 0.95,
"MP2=FULL STO-3G": 0.889,
"MP2=FULL 3-21G": 0.955,
"MP2=FULL 3-21G*": 0.948,
"MP2=FULL 6-31G": 0.95,
"MP2=FULL 6-31G*": 0.942,
"MP2=FULL 6-31G**": 0.934,
"MP2=FULL 6-31+G**": 0.939,
"MP2=FULL 6-311G*": 0.947,
"MP2=FULL 6-311G**": 0.949,
"MP2=FULL TZVP": 0.953,
"MP2=FULL cc-pVDZ": 0.95,
"MP2=FULL cc-pVTZ": 0.949,
"MP2=FULL cc-pVQZ": 0.957,
"MP2=FULL aug-cc-pVDZ": 0.969,
"MP2=FULL aug-cc-pVTZ": 0.951,
"MP2=FULL aug-cc-pVQZ": 0.956,
"MP3 STO-3G": 0.894,
"MP3 3-21G": 0.968,
"MP3 3-21G*": 0.965,
"MP3 6-31G": 0.966,
"MP3 6-31G*": 0.939,
"MP3 6-31G**": 0.935,
"MP3 6-31+G**": 0.931,
"MP3 TZVP": 0.935,
"MP3 cc-pVDZ": 0.948,
"MP3 cc-pVTZ": 0.945,
"MP3=FULL 6-31G*": 0.938,
"MP3=FULL 6-31+G**": 0.932,
"MP3=FULL TZVP": 0.934,
"MP3=FULL cc-pVDZ": 0.94,
"MP3=FULL cc-pVTZ": 0.933,
"B2PLYP 6-31G*": 0.949,
"B2PLYP 6-31+G**": 0.952,
"B2PLYP TZVP": 0.954,
"B2PLYP cc-pVDZ": 0.958,
"B2PLYP cc-pVTZ": 0.959,
"B2PLYP cc-pVQZ": 0.957,
"B2PLYP aug-cc-pVTZ": 0.961,
"B2PLYP=FULL 3-21G": 0.952,
"B2PLYP=FULL 6-31G*": 0.948,
"B2PLYP=FULL 6-31+G**": 0.951,
"B2PLYP=FULL TZVP": 0.954,
"B2PLYP=FULL cc-pVDZ": 0.959,
"B2PLYP=FULL cc-pVTZ": 0.956,
"B2PLYP=FULL aug-cc-pVDZ": 0.962,
"B2PLYP=FULL aug-cc-pVTZ": 0.959,
"CID 3-21G": 0.932,
"CID 3-21G*": 0.931,
"CID 6-31G": 0.935,
"CID 6-31G*": 0.924,
"CID 6-31G**": 0.924,
"CID 6-31+G**": 0.924,
"CID 6-311G*": 0.929,
"CID cc-pVDZ": 0.924,
"CID cc-pVTZ": 0.927,
"CISD 3-21G": 0.941,
"CISD 3-21G*": 0.934,
"CISD 6-31G": 0.938,
"CISD 6-31G*": 0.926,
"CISD 6-31G**": 0.918,
"CISD 6-31+G**": 0.922,
"CISD 6-311G*": 0.925,
"CISD cc-pVDZ": 0.922,
"CISD cc-pVTZ": 0.93,
"QCISD 3-21G": 0.969,
"QCISD 3-21G*": 0.961,
"QCISD 6-31G": 0.964,
"QCISD 6-31G*": 0.952,
"QCISD 6-31G**": 0.941,
"QCISD 6-31+G**": 0.945,
"QCISD 6-311G*": 0.957,
"QCISD 6-311G**": 0.954,
"QCISD TZVP": 0.955,
"QCISD cc-pVDZ": 0.959,
"QCISD cc-pVTZ": 0.956,
"QCISD aug-cc-pVDZ": 0.969,
"QCISD aug-cc-pVTZ": 0.962,
"CCD 3-21G": 0.972,
"CCD 3-21G*": 0.957,
"CCD 6-31G": 0.96,
"CCD 6-31G*": 0.947,
"CCD 6-31G**": 0.938,
"CCD 6-31+G**": 0.942,
"CCD 6-311G*": 0.955,
"CCD 6-311G**": 0.955,
"CCD TZVP": 0.948,
"CCD cc-pVDZ": 0.957,
"CCD cc-pVTZ": 0.934,
"CCD aug-cc-pVDZ": 0.965,
"CCD aug-cc-pVTZ": 0.957,
"CCSD 3-21G": 0.943,
"CCSD 3-21G*": 0.943,
"CCSD 6-31G": 0.943,
"CCSD 6-31G*": 0.944,
"CCSD 6-31G**": 0.933,
"CCSD 6-31+G**": 0.934,
"CCSD 6-311G*": 0.954,
"CCSD TZVP": 0.954,
"CCSD cc-pVDZ": 0.947,
"CCSD cc-pVTZ": 0.941,
"CCSD cc-pVQZ": 0.951,
"CCSD aug-cc-pVDZ": 0.963,
"CCSD aug-cc-pVTZ": 0.956,
"CCSD aug-cc-pVQZ": 0.953,
"CCSD=FULL 6-31G*": 0.95,
"CCSD=FULL TZVP": 0.948,
"CCSD=FULL cc-pVTZ": 0.948,
"CCSD=FULL aug-cc-pVTZ": 0.951,
}
element_list = [
["1 ", "H ", "Hydrogen"],
["2 ", "He", "Helium"],
["3 ", "Li", "Lithium"],
["4 ", "Be", "Beryllium"],
["5 ", "B ", "Boron"],
["6 ", "C ", "Carbon"],
["7 ", "N ", "Nitrogen"],
["8 ", "O ", "Oxygen"],
["9 ", "F ", "Fluorine"],
["10", "Ne", "Neon"],
["11", "Na", "Sodium"],
["12", "Mg", "Magnesium"],
["13", "Al", "Aluminum"],
["14", "Si", "Silicon"],
["15", "P ", "Phosphorus"],
["16", "S ", "Sulfur"],
["17", "Cl", "Chlorine"],
["18", "Ar", "Argon"],
["19", "K ", "Potassium"],
["20", "Ca", "Calcium"],
["21", "Sc", "Scandium"],
["22", "Ti", "Titanium"],
["23", "V ", "Vanadium"],
["24", "Cr", "Chromium"],
["25", "Mn", "Manganese"],
["26", "Fe", "Iron"],
["27", "Co", "Cobalt"],
["28", "Ni", "Nickel"],
["29", "Cu", "Copper"],
["30", "Zn", "Zinc"],
["31", "Ga", "Gallium"],
["32", "Ge", "Germanium"],
["33", "As", "Arsenic"],
["34", "Se", "Selenium"],
["35", "Br", "Bromine"],
["36", "Kr", "Krypton"],
["37", "Rb", "Rubidium"],
["38", "Sr", "Strontium"],
["39", "Y ", "Yttrium"],
["40", "Zr", "Zirconium"],
["41", "Nb", "Niobium"],
["42", "Mo", "Molybdenum"],
["43", "Tc", "Technetium"],
["44", "Ru", "Ruthenium"],
["45", "Rh", "Rhodium"],
["46", "Pd", "Palladium"],
["47", "Ag", "Silver"],
["48", "Cd", "Cadmium"],
["49", "In", "Indium"],
["50", "Sn", "Tin"],
["51", "Sb", "Antimony"],
["52", "Te", "Tellurium"],
["53", "I ", "Iodine"],
["54", "Xe", "Xenon"],
["55", "Cs", "Cesium"],
["56", "Ba", "Barium"],
["57", "La", "Lanthanum"],
["58", "Ce", "Cerium"],
["59", "Pr", "Praseodymium"],
["60", "Nd", "Neodymium"],
["61", "Pm", "Promethium"],
["62", "Sm", "Samarium"],
["63", "Eu", "Europium"],
["64", "Gd", "Gadolinium"],
["65", "Tb", "Terbium"],
["66", "Dy", "Dysprosium"],
["67", "Ho", "Holmium"],
["68", "Er", "Erbium"],
["69", "Tm", "Thulium"],
["70", "Yb", "Ytterbium"],
["71", "Lu", "Lutetium"],
["72", "Hf", "Hafnium"],
["73", "Ta", "Tantalum"],
["74", "W ", "Tungsten"],
["75", "Re", "Rhenium"],
["76", "Os", "Osmium"],
["77", "Ir", "Iridium"],
["78", "Pt", "Platinum"],
["79", "Au", "Gold"],
["80", "Hg", "Mercury"],
["81", "Tl", "Thallium"],
["82", "Pb", "Lead"],
["83", "Bi", "Bismuth"],
["84", "Po", "Polonium"],
["85", "At", "Astatine"],
["86", "Rn", "Radon"],
["87", "Fr", "Francium"],
["88", "Ra", "Radium"],
["89", "Ac", "Actinium"],
["90", "Th", "Thorium"],
["91", "Pa", "Protactinium"],
["92", "U ", "Uranium"],
["93", "Np", "Neptunium"],
["94", "Pu", "Plutonium"],
["95", "Am", "Americium"],
["96", "Cm", "Curium"],
["97", "Bk", "Berkelium"],
["98", "Cf", "Californium"],
["99", "Es", "Einsteinium"],
]
def get_vibrational_scaling(functional, basis_set):
"""
Returns vibrational scaling factor given the functional
and the basis set for the QM engine.
Parameters
----------
functional: str
Functional
basis_set: str
Basis set
Returns
-------
vib_scale: float
Vibrational scaling factor corresponding to the given
the basis_set and the functional.
Examples
--------
>>> get_vibrational_scaling("QCISD", "6-311G*")
0.957
"""
vib_scale = method_basis_scale_dict.get(functional + " " + basis_set)
return vib_scale
def unit_vector_N(u_BC, u_AB):
"""
Calculates unit normal vector perpendicular to plane ABC.
Parameters
----------
u_BC : (.. , 1, 3) array
Unit vector from atom B to atom C.
u_AB : (..., 1, 3) array
Unit vector from atom A to atom B.
Returns
-------
u_N : (..., 1, 3) array
Unit normal vector perpendicular to plane ABC.
Examples
--------
>>> u_BC = [0.34040355, 0.62192853, 0.27011169]
>>> u_AB = [0.28276792, 0.34232697, 0.02370306]
>>> unit_vector_N(u_BC, u_AB)
array([-0.65161629, 0.5726879 , -0.49741811])
"""
cross_product = np.cross(u_BC, u_AB)
norm_u_N = np.linalg.norm(cross_product)
u_N = cross_product / norm_u_N
return u_N
def delete_guest_angle_params(guest_qm_params_file="guest_qm_params.txt"):
"""
"""
f_params = open(guest_qm_params_file, "r")
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
lines_selected = lines_params[:to_begin] + lines_params[to_end + 1 :]
with open(guest_qm_params_file, "w") as f_:
f_.write("".join(lines_selected))
return
def remove_bad_angle_params(
guest_qm_params_file="guest_qm_params.txt", angle=1.00, k_angle=500):
with open(guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
lines_to_omit = []
for i in angle_params:
if float(re.findall(r"[-+]?\d+[.]?\d*", i)[0]) < float(angle) or float(
re.findall(r"[-+]?\d+[.]?\d*", i)[1]
) > float(k_angle):
lines_to_omit.append(i)
for b in lines_to_omit:
lines_params.remove(b)
with open(guest_qm_params_file, "w") as file:
for j in lines_params:
file.write(j)
def get_num_host_atoms(host_pdb):
"""
Reads the host PDB file and returns the
total number of atoms.
"""
ppdb = PandasPdb()
ppdb.read_pdb(host_pdb)
no_host_atoms = ppdb.df["ATOM"].shape[0]
return no_host_atoms
def change_names(inpcrd_file, prmtop_file, pdb_file):
command = "cp -r " + inpcrd_file + " system_qmmmrebind.inpcrd"
os.system(command)
command = "cp -r " + prmtop_file + " system_qmmmrebind.prmtop"
os.system(command)
command = "cp -r " + pdb_file + " system_qmmmrebind.pdb"
os.system(command)
def copy_file(source, destination):
"""
Copies a file from a source to the destination.
"""
shutil.copy(source, destination)
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array which contains the coordinates of all
the N atoms.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def force_angle_constant(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14 of
Seminario calculation paper; returns angle (in kcal/mol/rad^2)
and equilibrium angle (in degrees).
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y and Z
coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
# Normal vector to angle plane found
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_u_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_u_PA
u_PC = np.cross(u_CB, u_N)
norm_u_PC = np.linalg.norm(u_PC)
u_PC = u_PC / norm_u_PC
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum_first / scaling_1
sum_second = sum_second / scaling_2
# Added as two springs in series
k_theta = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta = 1 / k_theta
k_theta = -k_theta # Change to OPLS form
k_theta = abs(k_theta * 0.5) # Change to OPLS form
# Equilibrium Angle
theta_0 = math.degrees(math.acos(np.dot(u_AB, u_CB)))
# If the vectors u_CB and u_AB are linearly dependent u_N cannot be defined.
# This case is dealt with here :
if abs(sum((u_CB) - (u_AB))) < 0.01 or (
abs(sum((u_CB) - (u_AB))) > 1.99 and abs(sum((u_CB) - (u_AB))) < 2.01
):
scaling_1 = 1
scaling_2 = 1
[k_theta, theta_0] = force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
)
return k_theta, theta_0
def dot_product(u_PA, eig_AB):
"""
Returns the dot product of two vectors.
Parameters
----------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the
plane of A, B, C.
eig_AB : (..., 3, 3) array
Eigenvectors of the hessian matrix for
the bond AB.
"""
x = 0
for i in range(0, 3):
x = x + u_PA[i] * eig_AB[i].conjugate()
return x
def force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14
of Seminario calculation paper when the vectors
u_CB and u_AB are linearly dependent and u_N cannot
be defined. It instead takes samples of u_N across a
unit sphere for the calculation; returns angle
(in kcal/mol/rad^2) and equilibrium angle in degrees.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y,
and Z coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
k_theta_array = np.zeros((180, 360))
# Find force constant with varying u_N (with vector uniformly
# sampled across a sphere)
for theta in range(0, 180):
for phi in range(0, 360):
r = 1
u_N = [
r
* math.sin(math.radians(theta))
* math.cos(math.radians(theta)),
r
* math.sin(math.radians(theta))
* math.sin(math.radians(theta)),
r * math.cos(math.radians(theta)),
]
u_PA = np.cross(u_N, u_AB)
u_PA = u_PA / np.linalg.norm(u_PA)
u_PC = np.cross(u_CB, u_N)
u_PC = u_PC / np.linalg.norm(u_PC)
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Added as two springs in series
k_theta_ij = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta_ij = 1 / k_theta_ij
k_theta_ij = -k_theta_ij # Change to OPLS form
k_theta_ij = abs(k_theta_ij * 0.5) # Change to OPLS form
k_theta_array[theta, phi] = k_theta_ij
# Removes cases where u_N was linearly dependent of u_CB or u_AB.
# Force constant used is taken as the mean.
k_theta = np.mean(np.mean(k_theta_array))
# Equilibrium Angle independent of u_N
theta_0 = math.degrees(math.cos(np.dot(u_AB, u_CB)))
return k_theta, theta_0
def force_constant_bond(atom_A, atom_B, eigenvalues, eigenvectors, coords):
"""
Calculates the bond force constant for the bonds in the
molecule according to equation 10 of seminario paper,
given the bond atoms' indices and the corresponding
eigenvalues, eigenvectors and coordinates matrices.
Parameters
----------
atom_A : int
Index of Atom A.
atom_B : int
Index of Atom B.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing eigenvalues
of the hessian matrix, where N is the total number
of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing the
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y, and
Z coordinates of all N atoms.
Returns
--------
k_AB : float
Bond Force Constant value for the bond with atoms A and B.
"""
# Eigenvalues and eigenvectors calculated
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[:, :, atom_A, atom_B]
# Vector along bond
diff_AB = np.array(coords[atom_B, :]) - np.array(coords[atom_A, :])
norm_diff_AB = np.linalg.norm(diff_AB)
unit_vectors_AB = diff_AB / norm_diff_AB
k_AB = 0
# Projections of eigenvalues
for i in range(0, 3):
dot_product = abs(np.dot(unit_vectors_AB, eigenvectors_AB[:, i]))
k_AB = k_AB + (eigenvalues_AB[i] * dot_product)
k_AB = -k_AB * 0.5 # Convert to OPLS form
return k_AB
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array containing the coordinates of all the N atoms.
Returns
-------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the plane of A, B, C.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
def uniq(input_):
"""
Returns a list with only unique elements from a list
containing duplicate / repeating elements.
Parameters
----------
input_ : list
Input list.
Returns
-------
output : list
List with only unique elements.
Examples
--------
>>> lst = [2, 4, 2, 9, 10, 35, 10]
>>> uniq(lst)
[2, 4, 9, 10, 35]
"""
output = []
for x in input_:
if x not in output:
output.append(x)
return output
def search_in_file(file: str, word: str) -> list:
"""
Search for the given string in file and return lines
containing that string along with line numbers.
Parameters
----------
file : str
Input file.
word : str
Search word.
Returns
-------
list_of_results : list
List of lists with each element representing the
line number and the line contents.
"""
line_number = 0
list_of_results = []
with open(file, "r") as f:
for line in f:
line_number += 1
if word in line:
list_of_results.append((line_number, line.rstrip()))
return list_of_results
def list_to_dict(lst):
"""
Converts an input list with mapped characters (every
odd entry is the key of the dictionary and every
even entry adjacent to the odd entry is its correponding
value) to a dictionary.
Parameters
----------
lst : list
Input list.
Returns
-------
res_dct : dict
A dictionary with every element mapped with
its successive element starting from index 0.
Examples
--------
>>> lst = [5, 9, 3, 6, 2, 7]
>>> list_to_dict(lst)
{5: 9, 3: 6, 2: 7}
"""
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
def scale_list(list_):
"""
Returns a scaled list with the minimum value
subtracted from each element of the corresponding list.
Parameters
----------
list_ : list
Input list.
Returns
-------
scaled_list : list
Scaled list.
Examples
--------
>>> list_ = [6, 3, 5, 11, 3, 2, 8, 6]
>>> scale_list(list_)
[4, 1, 3, 9, 1, 0, 6, 4]
"""
scaled_list = [i - min(list_) for i in list_]
return scaled_list
def list_kJ_kcal(list_):
"""
Convert the elements in the list from
kiloJoules units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of kJ.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_kJ_kcal(list_)
[1.4340344168260037, 0.7170172084130019, 1.1950286806883366]
"""
converted_list = [i / 4.184 for i in list_]
return converted_list
def list_hartree_kcal(list_):
"""
Convert the elements in the list from
hartree units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of hartree.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_hartree_kcal(list_)
[3765.0564000000004, 1882.5282000000002, 3137.547]
"""
converted_list = [i * 627.5094 for i in list_]
return converted_list
def torsiondrive_input_to_xyz(psi_input_file, xyz_file):
"""
Returns an xyz file from a torsiondrive formatted
input file.
Parameters
----------
psi_input_file : str
Input file for the psi4 QM engine.
xyz_file : str
XYZ format file to write the coords of the system.
"""
with open(psi_input_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "molecule {" in lines[i]:
to_begin = int(i)
if "set {" in lines[i]:
to_end = int(i)
xyz_lines = lines[to_begin + 2 : to_end - 1]
with open(xyz_file, "w") as f:
f.write(str(len(xyz_lines)) + "\n")
f.write(xyz_file + "\n")
for i in xyz_lines:
f.write(i)
def xyz_to_pdb(xyz_file, coords_file, template_pdb, system_pdb):
"""
Converts a XYZ file to a PDB file.
Parameters
----------
xyz_file : str
XYZ file containing the coordinates of the system.
coords_file : str
A text file containing the coordinates part of XYZ file.
template_pdb : str
A pdb file to be used as a template for the required PDB.
system_pdb : str
Output PDB file with the coordinates updated in the
template pdb using XYZ file.
"""
with open(xyz_file, "r") as f:
lines = f.readlines()
needed_lines = lines[2:]
with open(coords_file, "w") as f:
for i in needed_lines:
f.write(i)
df = pd.read_csv(coords_file, header=None, delimiter=r"\s+")
df.columns = ["atom", "x", "y", "z"]
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = df["x"]
ppdb.df["ATOM"]["y_coord"] = df["y"]
ppdb.df["ATOM"]["z_coord"] = df["z"]
ppdb.to_pdb(system_pdb)
def generate_xml_from_pdb_sdf(system_pdb, system_sdf, system_xml):
"""
Generates an openforcefield xml file from the pdb file.
Parameters
----------
system_pdb : str
Input PDB file.
system_sdf : str
SDF file of the system.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_sdf
os.system(command)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(
system_pdb,
system_init_sdf,
system_sdf,
num_charge_atoms,
index_charge_atom_1,
charge_atom_1,
system_xml,
):
"""
Generates an openforcefield xml file from the pdb
file via SDF file and openforcefield.
Parameters
----------
system_pdb : str
Input PDB file.
system_init_sdf : str
SDF file for the system excluding charge information.
system_sdf : str
SDF file of the system.
num_charge_atoms : int
Total number of charged atoms in the PDB.
index_charge_atom_1 : int
Index of the first charged atom.
charge_atom_1 : float
Charge on first charged atom.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_init_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_init_sdf
os.system(command)
with open(system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(num_charge_atoms)
+ " "
+ str(index_charge_atom_1)
+ " "
+ str(charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def get_dihedrals(qm_scan_file):
"""
Returns dihedrals from the torsiondrive scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
dihedrals : list
List of all the dihedral values from the qm scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
return dihedrals
def get_qm_energies(qm_scan_file):
"""
Returns QM optimized energies from the torsiondrive
scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
qm_energies : list
List of all the qm optimiseed energies extracted from the torsiondrive
scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
qm_energies = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
energy = float(energy_dihedral[1])
qm_energies.append(energy)
return qm_energies
def generate_mm_pdbs(qm_scan_file, template_pdb):
"""
Generate PDBs from the torsiondrive scan file
based on a template PDB.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
lines_markers = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
lines_markers.append(i)
lines_markers.append(len(lines) + 1)
for i in range(len(lines_markers) - 1):
# pdb_file_to_write = str(dihedrals[i]) + ".pdb"
if dihedrals[i] > 0:
pdb_file_to_write = "plus_" + str(abs(dihedrals[i])) + ".pdb"
if dihedrals[i] < 0:
pdb_file_to_write = "minus_" + str(abs(dihedrals[i])) + ".pdb"
to_begin = lines_markers[i]
to_end = lines_markers[i + 1]
lines_to_write = lines[to_begin + 1 : to_end - 1]
x_coords = []
y_coords = []
z_coords = []
for i in lines_to_write:
coordinates = i
coordinates = re.findall(r"[-+]?\d+[.]?\d*", coordinates)
x = float(coordinates[0])
y = float(coordinates[1])
z = float(coordinates[2])
x_coords.append(x)
y_coords.append(y)
z_coords.append(z)
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = x_coords
ppdb.df["ATOM"]["y_coord"] = y_coords
ppdb.df["ATOM"]["z_coord"] = z_coords
ppdb.to_pdb(pdb_file_to_write)
def remove_mm_files(qm_scan_file):
"""
Delete all generated PDB files.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
command = "rm -rf " + i
os.system(command)
command = "rm -rf " + i[:-4] + ".inpcrd"
os.system(command)
command = "rm -rf " + i[:-4] + ".prmtop"
os.system(command)
def get_non_torsion_mm_energy(system_pdb, load_topology, system_xml):
"""
Returns sum of all the non-torsional energies (that
includes HarmonicBondForce, HarmonicAngleForce
and NonBondedForce) of the system from the PDB
file given the topology and the forcefield file.
Parameters
----------
system_pdb : str
System PDB file to load the openmm system topology
and coordinates.
load_topology : {"openmm", "parmed"}
Argument to specify how to load the topology.
system_xml : str
XML force field file for the openmm system.
Returns
-------
Sum of all the non-torsional energies of the system.
"""
system_prmtop = system_pdb[:-4] + ".prmtop"
system_inpcrd = system_pdb[:-4] + ".inpcrd"
if load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(system_pdb, structure=True).topology,
parmed.load_file(system_xml),
)
if load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(system_pdb).topology,
parmed.load_file(system_xml),
)
openmm_system.save(system_prmtop, overwrite=True)
openmm_system.coordinates = parmed.load_file(
system_pdb, structure=True
).coordinates
openmm_system.save(system_inpcrd, overwrite=True)
parm = parmed.load_file(system_prmtop, system_inpcrd)
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
# print(prmtop_energy_decomposition)
prmtop_energy_decomposition_value_no_torsion = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
return sum(prmtop_energy_decomposition_value_no_torsion)
def get_mm_potential_energies(qm_scan_file, load_topology, system_xml):
"""
Returns potential energy of the system from the PDB file
given the topology and the forcefield file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to spcify how to load the topology.
system_xml : str
XML file to load the openmm system.
Returns
-------
mm_potential_energies : list
List of all the non torsion mm energies for the
generated PDB files.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
mm_pdb_file = i
mm_potential_energies = []
for i in mm_pdb_list:
mm_pdb_file = i
mm_energy = get_non_torsion_mm_energy(
system_pdb=i, load_topology=load_topology, system_xml=system_xml,
)
mm_potential_energies.append(mm_energy)
return mm_potential_energies
def list_diff(list_1, list_2):
"""
Returns the difference between two lists as a list.
Parameters
----------
list_1 : list
First list
list_2 : list
Second list.
Returns
-------
diff_list : list
List containing the diferences between the elements of
the two lists.
Examples
--------
>>> list_1 = [4, 2, 8, 3, 0, 6, 7]
>>> list_2 = [5, 3, 1, 5, 6, 0, 4]
>>> list_diff(list_1, list_2)
[-1, -1, 7, -2, -6, 6, 3]
"""
diff_list = []
zipped_list = zip(list_1, list_2)
for list1_i, list2_i in zipped_list:
diff_list.append(list1_i - list2_i)
return diff_list
def dihedral_energy(x, k1, k2, k3, k4=0):
"""
Expression for the dihedral energy.
"""
energy_1 = k1 * (1 + np.cos(1 * x * 0.01745))
energy_2 = k2 * (1 - np.cos(2 * x * 0.01745))
energy_3 = k3 * (1 + np.cos(3 * x * 0.01745))
energy_4 = k4 * (1 - np.cos(4 * x * 0.01745))
dihedral_energy = energy_1 + energy_2 + energy_3 + energy_4
return dihedral_energy
def error_function(delta_qm, delta_mm):
"""
Root Mean Squared Error.
"""
squared_error = np.square(np.subtract(delta_qm, delta_mm))
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def error_function_boltzmann(delta_qm, delta_mm, T):
"""
Boltzmann Root Mean Squared Error.
"""
kb = 3.297623483 * 10 ** (-24) # in cal/K
delta_qm_boltzmann_weighted = [np.exp(-i / (kb * T)) for i in delta_qm]
squared_error = (
np.square(np.subtract(delta_qm, delta_mm))
* delta_qm_boltzmann_weighted
)
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def gen_init_guess(qm_scan_file, load_topology, system_xml):
"""
Initial guess for the torsional parameter.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to speify how to load the topology.
system_xml : str
XML force field file for the system.
Returns
-------
k_init_guess : list
Initial guess for the torsional parameters.
"""
x = get_dihedrals(qm_scan_file)
y = scale_list(
list_=get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
)
init_vals = [0.0, 0.0, 0.0, 0.0]
k_init_guess, covar = scipy.optimize.curve_fit(
dihedral_energy, x, y, p0=init_vals
)
for i in range(len(k_init_guess)):
if k_init_guess[i] < 0:
k_init_guess[i] = 0
return k_init_guess
def objective_function(k_array, x, delta_qm):
"""
Objective function for the torsional parameter fitting.
"""
delta_mm = dihedral_energy(
x, k1=k_array[0], k2=k_array[1], k3=k_array[2], k4=k_array[3]
)
loss_function = error_function(delta_qm, delta_mm)
return loss_function
def fit_params(qm_scan_file, load_topology, system_xml, method):
"""
Optimization of the objective function.
"""
k_guess = gen_init_guess(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
x_data = np.array(get_dihedrals(qm_scan_file))
delta_qm = np.array(
scale_list(list_hartree_kcal(list_=get_qm_energies(qm_scan_file)))
)
optimise = scipy.optimize.minimize(
objective_function,
k_guess,
args=(x_data, delta_qm),
method=method,
bounds=[(0.00, None), (0.00, None), (0.00, None), (0.00, None),],
)
return optimise.x
def get_tor_params(
qm_scan_file, template_pdb, load_topology, system_xml, method
):
"""
Returns the fitted torsional parameters.
"""
qm_e = get_qm_energies(qm_scan_file=qm_scan_file)
qm_e_kcal = list_hartree_kcal(qm_e)
delta_qm = scale_list(qm_e_kcal)
generate_mm_pdbs(qm_scan_file=qm_scan_file, template_pdb=template_pdb)
mm_pe_no_torsion_kcal = get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
delta_mm = scale_list(mm_pe_no_torsion_kcal)
opt_param = fit_params(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
return opt_param
def get_torsional_lines(
template_pdb,
system_xml,
qm_scan_file,
load_topology,
method,
dihedral_text_file,
):
"""
Returns the torsional lines for the XML forcefield file.
"""
opt_param = get_tor_params(
qm_scan_file=qm_scan_file,
template_pdb=template_pdb,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
dihedral_text = open(dihedral_text_file, "r")
dihedral_text_lines = dihedral_text.readlines()
atom_numbers = dihedral_text_lines[-1]
atom_index_from_1 = [
int(re.findall(r"\d+", atom_numbers)[0]),
int(re.findall(r"\d+", atom_numbers)[1]),
int(re.findall(r"\d+", atom_numbers)[2]),
int(re.findall(r"\d+", atom_numbers)[3]),
]
atom_index = [i - 1 for i in atom_index_from_1]
atom_index_lines = (
" "
+ "p1="
+ '"'
+ str(atom_index[0])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(atom_index[1])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(atom_index[2])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(atom_index[3])
+ '"'
+ " "
)
tor_lines = []
for i in range(len(opt_param)):
line_to_append = (
" "
+ "<Torsion "
+ "k="
+ '"'
+ str(round(opt_param[i], 8))
+ '"'
+ atom_index_lines
+ "periodicity="
+ '"'
+ str(i + 1)
+ '"'
+ " "
+ "phase="
+ '"'
+ "0"
+ '"'
+ "/>"
)
# print(line_to_append)
tor_lines.append(line_to_append)
return tor_lines
def singular_resid(pdbfile, qmmmrebind_init_file):
"""
Returns a PDB file with chain ID = A
Parameters
----------
pdbfile: str
Input PDB file
qmmmrebind_init_file: str
Output PDB file
"""
ppdb = PandasPdb().read_pdb(pdbfile)
ppdb.df["HETATM"]["chain_id"] = "A"
ppdb.df["ATOM"]["chain_id"] = "A"
ppdb.to_pdb(
path=qmmmrebind_init_file, records=None, gz=False, append_newline=True
)
def relax_init_structure(
pdbfile,
prmtopfile,
qmmmrebindpdb,
sim_output="output.pdb",
sim_steps=100000,
):
"""
Minimizing the initial PDB file with the given topology
file
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile : str
Input prmtop file.
qmmmrebind_init_file: str
Output PDB file.
sim_output: str
Simulation output trajectory file.
sim_steps: int
MD simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedMethod=simtk.openmm.app.PME,
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=10000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(qmmmrebindpdb, sim_steps)
)
simulation.step(sim_steps)
command = "rm -rf " + sim_output
os.system(command)
def truncate(x):
"""
Returns a float or an integer with an exact number
of characters.
Parameters
----------
x: str
input value
"""
if len(str(int(float(x)))) == 1:
x = format(x, ".8f")
if len(str(int(float(x)))) == 2:
x = format(x, ".7f")
if len(str(int(float(x)))) == 3:
x = format(x, ".6f")
if len(str(int(float(x)))) == 4:
x = format(x, ".5f")
if len(str(x)) > 10:
x = round(x, 10)
return x
def add_vectors_inpcrd(pdbfile, inpcrdfile):
"""
Adds periodic box dimensions to the inpcrd file
Parameters
----------
pdbfile: str
PDB file containing the periodic box information.
inpcrdfile: str
Input coordinate file.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
line_to_add = (
" "
+ truncate(vector_list[0])
+ " "
+ truncate(vector_list[1])
+ " "
+ truncate(vector_list[2])
+ " "
+ truncate(vector_list[3])
+ " "
+ truncate(vector_list[4])
+ " "
+ truncate(vector_list[5])
)
print(line_to_add)
with open(inpcrdfile, "a+") as f:
f.write(line_to_add)
def add_dim_prmtop(pdbfile, prmtopfile):
"""
Adds periodic box dimensions flag in the prmtop file.
Parameters
----------
prmtopfile: str
Input prmtop file.
pdbfile: str
PDB file containing the periodic box information.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
vector_list = [i / 10 for i in vector_list]
vector_list = [truncate(i) for i in vector_list]
vector_list = [i + "E+01" for i in vector_list]
line3 = (
" "
+ vector_list[3]
+ " "
+ vector_list[0]
+ " "
+ vector_list[1]
+ " "
+ vector_list[2]
)
print(line3)
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
with open(prmtopfile) as f1, open("intermediate.prmtop", "w") as f2:
for line in f1:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f2.write(line)
command = "rm -rf " + prmtopfile
os.system(command)
command = "mv intermediate.prmtop " + prmtopfile
os.system(command)
def add_period_prmtop(parm_file, ifbox):
"""
Changes the value of IFBOX if needed for the prmtop / parm file.
Set to 1 if standard periodic box and 2 when truncated octahedral.
"""
with open(parm_file) as f:
parm_lines = f.readlines()
lines_contain = []
for i in range(len(parm_lines)):
if parm_lines[i].startswith("%FLAG POINTERS"):
lines_contain.append(i + 4)
line = parm_lines[lines_contain[0]]
line_new = "%8s %6s %6s %6s %6s %6s %6s %6s %6s %6s" % (
re.findall(r"\d+", line)[0],
re.findall(r"\d+", line)[1],
re.findall(r"\d+", line)[2],
re.findall(r"\d+", line)[3],
re.findall(r"\d+", line)[4],
re.findall(r"\d+", line)[5],
re.findall(r"\d+", line)[6],
str(ifbox),
re.findall(r"\d+", line)[8],
re.findall(r"\d+", line)[9],
)
parm_lines[lines_contain[0]] = line_new + "\n"
with open(parm_file, "w") as f:
for i in parm_lines:
f.write(i)
def add_solvent_pointers_prmtop(non_reparams_file, reparams_file):
"""
Adds the flag solvent pointers to the topology file.
"""
f_non_params = open(non_reparams_file, "r")
lines_non_params = f_non_params.readlines()
for i in range(len(lines_non_params)):
if "FLAG SOLVENT_POINTERS" in lines_non_params[i]:
to_begin = int(i)
solvent_pointers = lines_non_params[to_begin : to_begin + 3]
file = open(reparams_file, "a")
for i in solvent_pointers:
file.write(i)
def prmtop_calibration(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
):
"""
Standardizes the topology files
Parameters
----------
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
"""
parm = parmed.load_file(prmtopfile, inpcrdfile)
parm_1 = parmed.tools.actions.changeRadii(parm, "mbondi3")
parm_1.execute()
parm_2 = parmed.tools.actions.setMolecules(parm)
parm_2.execute()
parm.save(prmtopfile, overwrite=True)
def run_openmm_prmtop_inpcrd(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with inpcrd and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
inpcrd = simtk.openmm.app.AmberInpcrdFile(inpcrdfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
if inpcrd.boxVectors is None:
add_vectors_inpcrd(
pdbfile=pdbfile, inpcrdfile=inpcrdfile,
)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print(inpcrd.boxVectors)
simulation.context.setPositions(inpcrd.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def run_openmm_prmtop_pdb(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with pdb and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def move_qmmmmrebind_files(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
pdbfile="system_qmmmrebind.pdb",
):
"""
Moves QMMMReBind generated topology and parameter files
to a new directory .
Parameters
----------
prmtopfile: str
QMMMReBind generated prmtop file.
inpcrdfile: str
QMMMReBind generated inpcrd file.
pdbfile: str
QMMMReBind generated PDB file.
"""
current_pwd = os.getcwd()
command = "rm -rf reparameterized_files"
os.system(command)
command = "mkdir reparameterized_files"
os.system(command)
shutil.copy(
current_pwd + "/" + prmtopfile,
current_pwd + "/" + "reparameterized_files" + "/" + prmtopfile,
)
shutil.copy(
current_pwd + "/" + inpcrdfile,
current_pwd + "/" + "reparameterized_files" + "/" + inpcrdfile,
)
shutil.copy(
current_pwd + "/" + pdbfile,
current_pwd + "/" + "reparameterized_files" + "/" + pdbfile,
)
def move_qm_files():
"""
Moves QM engine generated files to a new directory .
"""
current_pwd = os.getcwd()
command = "rm -rf qm_data"
os.system(command)
command = "mkdir qm_data"
os.system(command)
command = "cp -r " + "*.com* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.log* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.chk* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.fchk* " + current_pwd + "/" + "qm_data"
os.system(command)
def move_qmmmrebind_files():
"""
Moves all QMMMREBind files to a new directory.
"""
current_pwd = os.getcwd()
command = "rm -rf qmmmrebind_data"
os.system(command)
command = "mkdir qmmmrebind_data"
os.system(command)
command = "mv " + "*.sdf* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.txt* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.pdb* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xml* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.chk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.fchk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.com* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.log* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.inpcrd* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.prmtop* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.parm7* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.out* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*run_command* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.dat* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xyz* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
class PrepareQMMM:
"""
A class used to segregate the QM and MM regions.
This class contain methods to remove the solvent, ions and all
entities that are exclusive of receptor and the ligand. It also
defines the Quantum Mechanical (QM) region and the Molecular
Mechanical (MM) region based upon the distance of the ligand
from the receptor and the chosen number of receptor residues. It
is also assumed that the initial PDB file will have the receptor
followed by the ligand.
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
cleaned_pdb : str
Formatted PDB file containing only the receptor and the ligand.
guest_init_pdb : str
A separate ligand PDB file with atom numbers not beginning from 1.
host_pdb : str
A separate receptor PDB file with atom numbers beginning from 1.
guest_resname : str
Three letter residue ID for the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
guest_xyz : str, optional
A text file of the XYZ coordinates of the ligand.
distance : float, optional
The distance required to define the QM region of the receptor.
This is the distance between the atoms of the ligand and the
atoms of the receptor.
residue_list : str, optional
A text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
host_qm_atoms : str, optional
A text file of the atom numbers of the receptors in the QM
region.
host_mm_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region (all atoms except atoms in the QM region)
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
host_mm_pdb : str, optional
PDB file for the receptor's MM region.
qm_pdb : str, optional
PDB file for the QM region (receptor's QM region and the
ligand).
mm_pdb : str, optional
PDB file for the MM region.
host_mm_region_I_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region preceeding the QM region.
host_mm_region_II_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region following the QM region.
host_mm_region_I_pdb : str, optional
PDB file of the receptor in the MM region preceeding the
QM region.
host_mm_region_II_pdb : str, optional
PDB file of the receptor in the MM region following the
QM region.
num_residues : int, optional
Number of residues required in the QM region of the receptor.
"""
def __init__(
self,
init_pdb,
distance,
num_residues,
guest_resname,
cleaned_pdb="system.pdb",
guest_init_pdb="guest_init.pdb",
host_pdb="host.pdb",
guest_pdb="guest_init_II.pdb",
guest_xyz="guest_coord.txt",
residue_list="residue_list.txt",
host_qm_atoms="host_qm.txt",
host_mm_atoms="host_mm.txt",
host_qm_pdb="host_qm.pdb",
host_mm_pdb="host_mm.pdb",
qm_pdb="qm.pdb",
mm_pdb="mm.pdb",
host_mm_region_I_atoms="host_mm_region_I.txt",
host_mm_region_II_atoms="host_mm_region_II.txt",
host_mm_region_I_pdb="host_mm_region_I.pdb",
host_mm_region_II_pdb="host_mm_region_II.pdb",
):
self.init_pdb = init_pdb
self.distance = distance
self.num_residues = num_residues
self.guest_resname = guest_resname
self.cleaned_pdb = cleaned_pdb
self.guest_init_pdb = guest_init_pdb
self.host_pdb = host_pdb
self.guest_pdb = guest_pdb
self.guest_xyz = guest_xyz
self.residue_list = residue_list
self.host_qm_atoms = host_qm_atoms
self.host_mm_atoms = host_mm_atoms
self.host_qm_pdb = host_qm_pdb
self.host_mm_pdb = host_mm_pdb
self.qm_pdb = qm_pdb
self.mm_pdb = mm_pdb
self.host_mm_region_I_atoms = host_mm_region_I_atoms
self.host_mm_region_II_atoms = host_mm_region_II_atoms
self.host_mm_region_I_pdb = host_mm_region_I_pdb
self.host_mm_region_II_pdb = host_mm_region_II_pdb
def clean_up(self):
"""
Reads the given PDB file, removes all entities except the
receptor and ligand and saves a new pdb file.
"""
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
intermediate_file_1 = self.cleaned_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.cleaned_pdb[:-4] + "_intermediate_2.pdb"
command = (
"pdb4amber -i "
+ self.init_pdb
+ " -o "
+ intermediate_file_1
+ " --noter --dry"
)
os.system(command)
to_delete = (
intermediate_file_1[:-4] + "_nonprot.pdb",
intermediate_file_1[:-4] + "_renum.txt",
intermediate_file_1[:-4] + "_sslink",
intermediate_file_1[:-4] + "_water.pdb",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_1) as f1, open(
intermediate_file_2, "w") as f2:
for line in f1:
if not any(ion in line for ion in ions):
f2.write(line)
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.cleaned_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def create_host_guest(self):
"""
Saves separate receptor and ligand PDB files.
"""
with open(self.cleaned_pdb) as f1, open(self.host_pdb, "w") as f2:
for line in f1:
if not self.guest_resname in line and not "CRYST1" in line:
f2.write(line)
with open(self.cleaned_pdb) as f1, open(
self.guest_init_pdb, "w"
) as f2:
for line in f1:
if self.guest_resname in line or "END" in line:
f2.write(line)
def realign_guest(self):
"""
Saves a ligand PDB file with atom numbers beginning from 1.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_init_pdb)
to_subtract = min(ppdb.df["ATOM"]["atom_number"]) - 1
ppdb.df["ATOM"]["atom_number"] = (
ppdb.df["ATOM"]["atom_number"] - to_subtract
)
intermediate_file_1 = self.guest_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.guest_pdb[:-4] + "_intermediate_2.pdb"
ppdb.to_pdb(path=intermediate_file_1)
command = (
"pdb4amber -i "
+ intermediate_file_1
+ " -o "
+ intermediate_file_2
)
os.system(command)
to_delete = (
intermediate_file_2[:-4] + "_nonprot.pdb",
intermediate_file_2[:-4] + "_renum.txt",
intermediate_file_2[:-4] + "_sslink",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.guest_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def get_guest_coord(self):
"""
Saves a text file of the XYZ coordinates of the ligand.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
xyz = ppdb.df["ATOM"][["x_coord", "y_coord", "z_coord"]]
xyz_to_list = xyz.values.tolist()
np.savetxt(self.guest_xyz, xyz_to_list)
def get_qm_resids(self):
"""
Saves a text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
"""
guest_coord_list = np.loadtxt(self.guest_xyz)
host_atom_list = []
for i in range(len(guest_coord_list)):
reference_point = guest_coord_list[i]
# TODO: move reads outside of loop
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
distances = ppdb.distance(xyz=reference_point, records=("ATOM"))
all_within_distance = ppdb.df["ATOM"][
distances < float(self.distance)
]
host_df = all_within_distance["atom_number"]
host_list = host_df.values.tolist()
host_atom_list.append(host_list)
host_atom_list = list(itertools.chain(*host_atom_list))
host_atom_list = set(host_atom_list)
host_atom_list = list(host_atom_list)
host_atom_list.sort()
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
index_list = []
for i in host_atom_list:
indices = np.where(df["atom_number"] == i)
indices = list(indices)[0]
indices = list(indices)
index_list.append(indices)
index_list = list(itertools.chain.from_iterable(index_list))
df1 = df.iloc[
index_list,
]
# TODO: make it write list of integers
resid_num = list(df1.residue_number.unique())
np.savetxt(self.residue_list, resid_num, fmt="%i")
def get_host_qm_mm_atoms(self):
"""
Saves a text file of the atom numbers of the receptors in the QM
region and MM region separately.
"""
resid_num = np.loadtxt(self.residue_list)
# approximated_res_list = [int(i) for i in resid_num]
approximated_res_list = []
# TODO: what is this doing?
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
host_index_nested_list = []
for i in approximated_res_list:
indices = np.where(df["residue_number"] == i)
#TODO: the program seems to error when this line is removed, which
# makes no sense.
indices = list(indices)[0]
indices = list(indices)
host_index_nested_list.append(indices)
host_index_list = list(
itertools.chain.from_iterable(host_index_nested_list)
)
df_atom = df.iloc[host_index_list]
df_atom_number = df_atom["atom_number"]
host_atom_list = df_atom_number.values.tolist()
selected_atoms = []
selected_atoms.extend(host_atom_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
non_selected_atoms = list(set(len_atoms).difference(selected_atoms))
assert len(non_selected_atoms) + len(selected_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_qm_atoms, selected_atoms, fmt="%i")
np.savetxt(self.host_mm_atoms, non_selected_atoms, fmt="%i")
def save_host_pdbs(self):
"""
Saves a PDB file for the receptor's QM region and MM
region separately.
"""
selected_atoms = np.loadtxt(self.host_qm_atoms)
# TODO: not necessary if savetxt writes in integers
selected_atoms = [int(i) for i in selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_pdb, records=None, gz=False, append_newline=True,
)
non_selected_atoms = np.loadtxt(self.host_mm_atoms)
non_selected_atoms = [int(i) for i in non_selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in non_selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_qm_pdb, records=None, gz=False, append_newline=True,
)
def get_host_mm_region_atoms(self):
"""
Saves a text file for the atoms of the receptor's MM region
preceding the QM region and saves another text file for the
atoms of the receptor's MM region folllowing the QM region.
"""
resid_num = np.loadtxt(self.residue_list)
approximated_res_list = []
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
# print(approximated_res_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["residue_number"]]
res_list = list(set(df["residue_number"].to_list()))
res_mm_list = list(set(res_list).difference(approximated_res_list))
# print(res_mm_list)
res_mm_region_I_list = []
# TODO: This can probably be made into a single loop by comparing i
# to the maximum value within approximated_res_list
for i in res_mm_list:
for j in approximated_res_list:
if i < j:
res_mm_region_I_list.append(i)
res_mm_region_I_list = list(set(res_mm_region_I_list))
res_mm_region_II_list = list(
set(res_mm_list).difference(res_mm_region_I_list)
)
# print(res_mm_region_II_list)
ppdb.read_pdb(self.host_mm_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
mm_region_I_index_nested_list = []
for i in res_mm_region_I_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_I_index_nested_list.append(indices)
mm_region_I_index_list = list(
itertools.chain.from_iterable(mm_region_I_index_nested_list)
)
df_atom = df.iloc[mm_region_I_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_I_atom_list = df_atom_number.values.tolist()
mm_region_I_atoms = []
mm_region_I_atoms.extend(mm_region_I_atom_list)
mm_region_II_index_nested_list = []
for i in res_mm_region_II_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_II_index_nested_list.append(indices)
mm_region_II_index_list = list(
itertools.chain.from_iterable(mm_region_II_index_nested_list)
)
df_atom = df.iloc[mm_region_II_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_II_atom_list = df_atom_number.values.tolist()
mm_region_II_atoms = []
mm_region_II_atoms.extend(mm_region_II_atom_list)
ppdb.read_pdb(self.host_mm_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
assert len(mm_region_I_atoms) + len(mm_region_II_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_mm_region_I_atoms, mm_region_I_atoms, fmt="%i")
np.savetxt(self.host_mm_region_II_atoms, mm_region_II_atoms, fmt="%i")
def save_host_mm_regions_pdbs(self):
"""
Saves a PDB file for the receptor's MM region preceding
the QM region and saves another PDB file for the receptor's
MM region folllowing the QM region.
"""
mm_region_I_atoms = np.loadtxt(self.host_mm_region_I_atoms)
mm_region_I_atoms = [int(i) for i in mm_region_I_atoms]
mm_region_II_atoms = np.loadtxt(self.host_mm_region_II_atoms)
mm_region_II_atoms = [int(i) for i in mm_region_II_atoms]
# NOTE: this is a slightly confusing way to define the atoms to
# write to a PDB - the members that are *not* in a section, rather
# than the members that are.
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_II_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_I_pdb,
records=None,
gz=False,
append_newline=True,
)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_I_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_II_pdb,
records=None,
gz=False,
append_newline=True,
)
def get_qm_mm_regions(self):
"""
Saves separate PDB files for the QM and MM regions.
QM regions comprise the QM region of the receptor
and the entire ligand where the MM region comprise
the non-selected QM regions of the receptor.
"""
with open(self.host_qm_pdb) as f1, open(self.qm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
with open(self.guest_pdb) as f1, open(self.qm_pdb, "a") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
with open(self.host_mm_pdb) as f1, open(self.mm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
class PrepareGaussianGuest:
"""
A class used to prepare the QM engine input file (Gaussian)
for the ligand and run QM calculations with appropriate
keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract ligand's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="guest.out",
fchk_out_file="guest_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the ligand.
"""
command_line_1 = "%Chk = " + self.guest_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.guest_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.guest_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand locally.
"""
execute_command = (
"g16"
+ " < "
+ self.guest_pdb[:-4]
+ ".com"
+ " > "
+ self.guest_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.guest_pdb[:-4]
+ ".chk"
+ " "
+ self.guest_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class PrepareGaussianHostGuest:
"""
A class used to prepare the QM engine input file (Gaussian) for
the receptor - ligand complex and run the QM calculations with
the appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine for the receptor - ligand complex. It then runs
a QM calculation with the given basis set and functional. Checkpoint
file is then converted to a formatted checkpoint file. Output files
(.log, .chk, and .fhck) will then be used to extract charges for the
ligand and the receptor.
...
Attributes
----------
charge : int, optional
Total charge of the receptor - ligand complex.
multiplicity : int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_system_out_file : str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_system_out_file : str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
host_guest_input : str, optional
Gaussian input file (.com extension) for the receptor - ligand
QM region.
qm_guest_charge_parameter_file : str, optional
File containing the charges of ligand atoms and their corresponding
atoms. Charge obtained are the polarised charged due to the
surrounding receptor's region.
qm_host_charge_parameter_file : str, optional
File containing the charges of the QM region of the receptor.
qm_guest_atom_charge_parameter_file : str, optional
File containing the charges of ligand atoms. Charge obtained
are the polarised charged due to the surrounding receptor's region.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="",
frequency="",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6) SCRF=PCM",
gauss_system_out_file="system_qm.out",
fchk_system_out_file="system_qm_fchk.out",
host_guest_input="host_guest.com",
qm_guest_charge_parameter_file="guest_qm_surround_charges.txt",
qm_host_charge_parameter_file="host_qm_surround_charges.txt",
qm_guest_atom_charge_parameter_file="guest_qm_atom_surround_charges.txt",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
self.gauss_system_out_file = gauss_system_out_file
self.fchk_system_out_file = fchk_system_out_file
self.host_guest_input = host_guest_input
self.qm_guest_charge_parameter_file = qm_guest_charge_parameter_file
self.qm_host_charge_parameter_file = qm_host_charge_parameter_file
self.qm_guest_atom_charge_parameter_file = (
qm_guest_atom_charge_parameter_file
)
def write_input(self):
"""
Writes a Gaussian input file for the receptor - ligand QM region.
"""
command_line_1 = "%Chk = " + self.host_guest_input[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = "Gaussian Input File"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_1 = pd.concat([df_1, df_2, df_3], axis=1)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_2 = pd.concat([df_1, df_2, df_3], axis=1)
df_merged = pd.concat([df_merged_1, df_merged_2], axis=0)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_guest_input, "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand - receptor region
locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_guest_input
+ " > "
+ self.host_guest_input[:-4]
+ ".log"
)
with open(self.gauss_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_guest_input[:-4]
+ ".chk"
+ " "
+ self.host_guest_input[:-4]
+ ".fchk"
)
with open(self.fchk_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_qm_host_guest_charges(self):
"""
Extract charge information for the receptor - ligand QM region.
"""
log_file = self.host_guest_input[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
# Why + 4?
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df_guest = ppdb.df["ATOM"]
number_guest_atoms = df_guest.shape[0]
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
number_host_atoms = df_charge.shape[0] - number_guest_atoms
df_charge_guest = df_charge.head(number_guest_atoms)
df_charge_host = df_charge.tail(number_host_atoms)
df_charge_only_guest = df_charge_guest["Charge"]
df_charge_guest.to_csv(
self.qm_guest_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_host.to_csv(
self.qm_host_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_only_guest.to_csv(
self.qm_guest_atom_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
class ParameterizeGuest:
"""
A class used to obtain force field parameters for the ligand (bond,
angle and charge parameters) from QM calculations.
This class contain methods to process the output files of the
Gaussian QM output files (.chk, .fchk and .log files). Methods
in the class extract the unprocessed hessian matrix from the
Gaussian QM calculations, processes it and uses the Modified
Seminario Method to ontain the bond and angle parameters. The
class also extracts the QM charges from the log file.
...
Attributes
----------
xyz_file: str, optional
XYZ file for ligand coordinates obtained from its corresponding
formatted checkpoint file.
coordinate_file: str, optional
Text file containing the ligand coordinates (extracted
from the formatted checkpoint file).
unprocessed_hessian_file: str, optional
Unprocessed hessian matrix of the ligand obtained from the
formatted checkpoint file.
bond_list_file: str, optional
Text file containing the bond information of the ligand extracted
from the log file.
angle_list_file: str, optional
Text file containing the angle information of the ligand extracted
from the log file.
hessian_file: str, optional
Processed hessian matrix of the ligand.
atom_names_file: str, optional
Text file containing the list of atom names from the fchk file.
bond_parameter_file: str, optional
Text file containing the bond parameters for the ligand obtained
using the Modified Seminario method.
angle_parameter_file: str, optional
Text file containing the angle parameters of the ligand obtained
using the Modified Seminario method..
charge_parameter_file: str, optional
Text file containing the QM charges of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
proper_dihedral_file: str, optional
A text file containing proper dihedral angles of the ligand.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
"""
def __init__(
self,
xyz_file="guest_coords.xyz",
coordinate_file="guest_coordinates.txt",
unprocessed_hessian_file="guest_unprocessed_hessian.txt",
bond_list_file="guest_bond_list.txt",
angle_list_file="guest_angle_list.txt",
hessian_file="guest_hessian.txt",
atom_names_file="guest_atom_names.txt",
bond_parameter_file="guest_bonds.txt",
angle_parameter_file="guest_angles.txt",
charge_parameter_file="guest_qm_surround_charges.txt",
guest_pdb="guest_init_II.pdb",
proper_dihedral_file="proper_dihedrals.txt",
functional="B3LYP",
basis_set="6-31G",
):
self.xyz_file = xyz_file
self.coordinate_file = coordinate_file
self.unprocessed_hessian_file = unprocessed_hessian_file
self.bond_list_file = bond_list_file
self.angle_list_file = angle_list_file
self.hessian_file = hessian_file
self.atom_names_file = atom_names_file
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.charge_parameter_file = charge_parameter_file
self.guest_pdb = guest_pdb
self.proper_dihedral_file = proper_dihedral_file
self.functional = functional
self.basis_set = basis_set
def get_xyz(self):
"""
Saves XYZ file from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
to_begin = int(i)
cartesian_coords = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_coordinates / 5))
]
cartesian_list = []
for i in range(len(cartesian_coords)):
cartesian_list.append(cartesian_coords[i].strip().split())
coordinates_list = [
item for sublist in cartesian_list for item in sublist
]
# Converted from Atomic units (Bohrs) to Angstroms
list_coords = [float(x) * BOHRS_PER_ANGSTROM for x in coordinates_list]
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_number_strings = lines[to_begin + 1 : to_end]
atom_numbers_nested = []
for i in range(len(atomic_number_strings)):
atom_numbers_nested.append(atomic_number_strings[i].strip().split())
numbers = [item for sublist in atom_numbers_nested for item in sublist]
N = int(no_coordinates / 3)
# Opens the new xyz file
with open(self.xyz_file, "w") as file:
file.write(str(N) + "\n \n")
coords = np.zeros((N, 3))
n = 0
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
# Print coordinates to new input_coords.xyz file
for i in range(0, N):
for j in range(0, 3):
coords[i][j] = list_coords[n]
n = n + 1
file.write(
names[i]
+ str(round(coords[i][0], 3))
+ " "
+ str(round(coords[i][1], 3))
+ " "
+ str(round(coords[i][2], 3))
+ "\n"
)
np.savetxt(self.coordinate_file, coords, fmt="%s")
def get_unprocessed_hessian(self):
"""
Saves a text file of the unprocessed hessian matrix from the
formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
no_hessian = re.findall(r"\d+|\d+.\d+", lines[i])
no_hessian = int(no_hessian[0])
to_begin = int(i)
hessian = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_hessian / 5))
]
hessian_list = []
for i in range(len(hessian)):
hessian_list.append(hessian[i].strip().split())
unprocessed_Hessian = [
item for sublist in hessian_list for item in sublist
]
np.savetxt(
self.unprocessed_hessian_file, unprocessed_Hessian, fmt="%s",
)
def get_bond_angles(self):
"""
Saves a text file containing bonds and angles from the gaussian
log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as fid:
tline = fid.readline()
bond_list = []
angle_list = []
tmp = "R" # States if bond or angle
# Finds the bond and angles from the .log file
while tline:
tline = fid.readline()
# Line starts at point when bond and angle list occurs
if (
len(tline) > 80
and tline[0:81].strip()
== "! Name Definition Value Derivative Info. !"
):
tline = fid.readline()
tline = fid.readline()
# Stops when all bond and angles recorded
while (tmp[0] == "R") or (tmp[0] == "A"):
line = tline.split()
tmp = line[1]
# Bond or angles listed as string
list_terms = line[2][2:-1]
# Bond List
if tmp[0] == "R":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
bond_list.append(x)
# Angle List
if tmp[0] == "A":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
angle_list.append(x)
tline = fid.readline()
# Leave loop
tline = -1
np.savetxt(self.bond_list_file, bond_list, fmt="%s")
np.savetxt(self.angle_list_file, angle_list, fmt="%s")
def get_hessian(self):
"""
Extracts hessian matrix from the unprocessed hessian matrix
and saves into a new file.
"""
unprocessed_Hessian = np.loadtxt(self.unprocessed_hessian_file)
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
length_hessian = 3 * N
hessian = np.zeros((length_hessian, length_hessian))
m = 0
# Write the hessian in a 2D array format
for i in range(0, length_hessian):
for j in range(0, (i + 1)):
hessian[i][j] = unprocessed_Hessian[m]
hessian[j][i] = unprocessed_Hessian[m]
m = m + 1
hessian = (hessian * HARTREE_PER_KCAL_MOL) / (
BOHRS_PER_ANGSTROM ** 2
) # Change from Hartree/bohr to kcal/mol/ang
np.savetxt(self.hessian_file, hessian, fmt="%s")
def get_atom_names(self):
"""
Saves a list of atom names from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
atom_names = []
for i in range(0, len(names)):
atom_names.append(names[i].strip() + str(i + 1))
np.savetxt(self.atom_names_file, atom_names, fmt="%s")
def get_bond_angle_params(self):
"""
Saves the bond and angle parameter files obtained from
the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
coords = np.loadtxt(self.coordinate_file)
hessian = np.loadtxt(self.hessian_file)
bond_list = np.loadtxt(self.bond_list_file, dtype=int)
atom_names = np.loadtxt(self.atom_names_file, dtype=str)
# Find bond lengths
bond_lengths = np.zeros((N, N))
for i in range(0, N):
for j in range(0, N):
diff_i_j = np.array(coords[i, :]) - np.array(coords[j, :])
bond_lengths[i][j] = np.linalg.norm(diff_i_j)
eigenvectors = np.empty((3, 3, N, N), dtype=complex)
eigenvalues = np.empty((N, N, 3), dtype=complex)
partial_hessian = np.zeros((3, 3))
for i in range(0, N):
for j in range(0, N):
partial_hessian = hessian[
(i * 3) : ((i + 1) * 3), (j * 3) : ((j + 1) * 3)
]
[a, b] = np.linalg.eig(partial_hessian)
eigenvalues[i, j, :] = a
eigenvectors[:, :, i, j] = b
# Modified Seminario method to find the bond parameters and
# print them to file
file_bond = open(self.bond_parameter_file, "w")
k_b = np.zeros(len(bond_list))
bond_length_list = np.zeros(len(bond_list))
unique_values_bonds = [] # Used to find average values
for i in range(0, len(bond_list)):
AB = force_constant_bond(
bond_list[i][0],
bond_list[i][1],
eigenvalues,
eigenvectors,
coords,
)
BA = force_constant_bond(
bond_list[i][1],
bond_list[i][0],
eigenvalues,
eigenvectors,
coords,
)
# Order of bonds sometimes causes slight differences,
# find the mean
k_b[i] = np.real((AB + BA) / 2)
# Vibrational_scaling takes into account DFT deficities /
# anharmocity
vibrational_scaling = get_vibrational_scaling(
functional=self.functional, basis_set=self.basis_set
)
vibrational_scaling_squared = vibrational_scaling ** 2
k_b[i] = k_b[i] * vibrational_scaling_squared
bond_length_list[i] = bond_lengths[bond_list[i][0]][
bond_list[i][1]
]
file_bond.write(
atom_names[bond_list[i][0]]
+ "-"
+ atom_names[bond_list[i][1]]
+ " "
)
file_bond.write(
str("%#.5g" % k_b[i])
+ " "
+ str("%#.4g" % bond_length_list[i])
+ " "
+ str(bond_list[i][0] + 1)
+ " "
+ str(bond_list[i][1] + 1)
)
file_bond.write("\n")
unique_values_bonds.append(
[
atom_names[bond_list[i][0]],
atom_names[bond_list[i][1]],
k_b[i],
bond_length_list[i],
1,
]
)
file_bond.close()
angle_list = np.loadtxt(self.angle_list_file, dtype=int)
# Modified Seminario method to find the angle parameters
# and print them to file
file_angle = open(self.angle_parameter_file, "w")
k_theta = np.zeros(len(angle_list))
theta_0 = np.zeros(len(angle_list))
unique_values_angles = [] # Used to find average values
# Modified Seminario part goes here ...
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
# A structure is created with the index giving the central
# atom of the angle,
# an array then lists the angles with that central atom.
# i.e. central_atoms_angles{3} contains an array of angles
# with central atom 3
for i in range(0, len(coords)):
central_atoms_angles.append([])
for j in range(0, len(angle_list)):
if i == angle_list[j][1]:
# For angle ABC, atoms A C are written to array
AC_array = [angle_list[j][0], angle_list[j][2], j]
central_atoms_angles[i].append(AC_array)
# For angle ABC, atoms C A are written to array
CA_array = [angle_list[j][2], angle_list[j][0], j]
central_atoms_angles[i].append(CA_array)
# Sort rows by atom number
for i in range(0, len(coords)):
central_atoms_angles[i] = sorted(
central_atoms_angles[i], key=itemgetter(0)
)
# Find normals u_PA for each angle
unit_PA_all_angles = []
for i in range(0, len(central_atoms_angles)):
unit_PA_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the
# corresponding u_PA value
# is found for the plane ABC and bond AB, where ABC
# corresponds to the order
# of the arguements. This is why the reverse order
# was also added
unit_PA_all_angles[i].append(
u_PA_from_angles(
central_atoms_angles[i][j][0],
i,
central_atoms_angles[i][j][1],
coords,
)
)
# Finds the contributing factors from the other angle terms
# scaling_factor_all_angles
# = cell(max(max(angle_list))); %This array will contain
# scaling factor and angle list position
scaling_factor_all_angles = []
for i in range(0, len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
n = 1
m = 1
angles_around = 0
additional_contributions = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][
j
][2]
# Goes through the list of angles with the same central atom
# and computes the
# term need for the modified Seminario method
# Forwards directions, finds the same bonds with the central atom i
while (
((j + n) < len(central_atoms_angles[i]))
and central_atoms_angles[i][j][0]
== central_atoms_angles[i][j + n][0]
):
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j + n][:],
)
)
)
** 2
)
n = n + 1
angles_around = angles_around + 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][
0
] == central_atoms_angles[i][j - m][0]:
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j - m][:],
)
)
)
** 2
)
m = m + 1
angles_around = angles_around + 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution to
# change to normal
# Seminario method comment out + part
scaling_factor_all_angles[i][j][0] = 1 + (
additional_contributions / (m + n - 2)
)
else:
scaling_factor_all_angles[i][j][0] = 1
scaling_factors_angles_list = []
for i in range(0, len(angle_list)):
scaling_factors_angles_list.append([])
# Orders the scaling factors according to the angle list
for i in range(0, len(central_atoms_angles)):
for j in range(0, len(central_atoms_angles[i])):
scaling_factors_angles_list[
scaling_factor_all_angles[i][j][1]
].append(scaling_factor_all_angles[i][j][0])
# Finds the angle force constants with the scaling factors
# included for each angle
for i in range(0, len(angle_list)):
# Ensures that there is no difference when the
# ordering is changed
[AB_k_theta, AB_theta_0] = force_angle_constant(
angle_list[i][0],
angle_list[i][1],
angle_list[i][2],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][0],
scaling_factors_angles_list[i][1],
)
[BA_k_theta, BA_theta_0] = force_angle_constant(
angle_list[i][2],
angle_list[i][1],
angle_list[i][0],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][1],
scaling_factors_angles_list[i][0],
)
k_theta[i] = (AB_k_theta + BA_k_theta) / 2
theta_0[i] = (AB_theta_0 + BA_theta_0) / 2
# Vibrational_scaling takes into account DFT
# deficities/ anharmonicity
k_theta[i] = k_theta[i] * vibrational_scaling_squared
file_angle.write(
atom_names[angle_list[i][0]]
+ "-"
+ atom_names[angle_list[i][1]]
+ "-"
+ atom_names[angle_list[i][2]]
+ " "
)
file_angle.write(
str("%#.4g" % k_theta[i])
+ " "
+ str("%#.4g" % theta_0[i])
+ " "
+ str(angle_list[i][0] + 1)
+ " "
+ str(angle_list[i][1] + 1)
+ " "
+ str(angle_list[i][2] + 1)
)
file_angle.write("\n")
unique_values_angles.append(
[
atom_names[angle_list[i][0]],
atom_names[angle_list[i][1]],
atom_names[angle_list[i][2]],
k_theta[i],
theta_0[i],
1,
]
)
file_angle.close()
def get_charges(self):
"""
Saves the atomic charges in a text file obtained from
the Gaussian log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
df_charge.to_csv(
self.charge_parameter_file, index=False, header=False, sep=" ",
)
def get_proper_dihedrals(self):
"""
Saves proper dihedral angles of the ligand in a text file.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
no_atoms = len(ppdb.df["ATOM"])
atom_index_list = []
for i in range(no_atoms):
atom_index_list.append(i + 1)
possible_dihedrals = []
for dihed in itertools.permutations(atom_index_list, 4):
possible_dihedrals.append(dihed)
df_bonds = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df_bonds.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
bond1 = df_bonds["bond_1"].values.tolist()
bond2 = df_bonds["bond_2"].values.tolist()
bond_list_list = []
for i in range(len(bond1)):
args = (bond1[i], bond2[i])
bond_list_list.append(list(args))
reverse_bond_list_list = []
for bonds in bond_list_list:
reverse_bond_list_list.append(reverse_list(bonds))
bond_lists = bond_list_list + reverse_bond_list_list
proper_dihed_repeated = []
for i in range(len(possible_dihedrals)):
dihed_frag = (
[possible_dihedrals[i][0], possible_dihedrals[i][1]],
[possible_dihedrals[i][1], possible_dihedrals[i][2]],
[possible_dihedrals[i][2], possible_dihedrals[i][3]],
)
a = [
dihed_frag[0] in bond_lists,
dihed_frag[1] in bond_lists,
dihed_frag[2] in bond_lists,
]
if a == [True, True, True]:
proper_dihed_repeated.append(possible_dihedrals[i])
len_repeated_dihed_list = len(proper_dihed_repeated)
proper_dihedrals = proper_dihed_repeated
for x in proper_dihedrals:
z = x[::-1]
if z in proper_dihedrals:
proper_dihedrals.remove(z)
len_non_repeated_dihed_list = len(proper_dihedrals)
# print(len_repeated_dihed_list == len_non_repeated_dihed_list * 2)
np.savetxt(self.proper_dihedral_file, proper_dihedrals, fmt="%s")
# return(proper_dihedrals)
class PrepareGaussianHost:
"""
A class used to prepare the QM engine input file (Gaussian)
for the receptor and run QM calculations with appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract receptors's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the receptor.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the receptor where S represents
the total spin of the receptor.
host_qm_pdb: str, optional
PDB file of the receptor's QM region with atom numbers
beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the receptor
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE) SCF=(maxcycles=4000) SYMMETRY=NONE",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="host_qm.out",
fchk_out_file="host_qm_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the receptor QM region.
"""
# TODO: create generic function for Gaussian Input file (DRY principle)
command_line_1 = "%Chk = " + self.host_qm_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.host_qm_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_qm_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the receptor locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_qm_pdb[:-4]
+ ".com"
+ " > "
+ self.host_qm_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_qm_pdb[:-4]
+ ".chk"
+ " "
+ self.host_qm_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class ParameterizeHost:
"""
A class used to obtain force field parameters for the QM region
of the receptor (bond, angle and charge parameters) from QM
calculations.
This class contain methods to process the output files of the
Gaussian QM output files (.chk, .fchk and .log files). Methods
in the class extract the unprocessed hessian matrix from the
Gaussian QM calculations, processes it and uses the Modified
Seminario Method to ontain the bond and angle parameters. The
class also extracts the QM charges from the log file.
...
Attributes
----------
xyz_file: str, optional
XYZ file for ligand coordinates obtained from its corresponding
formatted checkpoint file.
coordinate_file: str, optional
Text file containing the receptor coordinates (extracted
from the formatted checkpoint file).
unprocessed_hessian_file: str, optional
Unprocessed hessian matrix of the receptor obtained from the
formatted checkpoint file.
bond_list_file: str, optional
Text file containing the bond information of the receptor
extracted from the log file.
angle_list_file: str, optional
Text file containing the angle information of the receptor
extracted from the log file.
hessian_file: str, optional
Processed hessian matrix of the receptor.
atom_names_file: str, optional
Text file containing the list of atom names from the fchk file.
bond_parameter_file: str, optional
Text file containing the bond parameters for the receptor
obtained using the Modified Seminario method.
angle_parameter_file: str, optional
Text file containing the angle parameters of the receptor.
charge_parameter_file: str, optional
Text file containing the QM charges of the receptor.
host_qm_pdb: str, optional
PDB file for the receptor's QM region.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
"""
def __init__(
self,
xyz_file="host_qm_coords.xyz",
coordinate_file="host_qm_coordinates.txt",
unprocessed_hessian_file="host_qm_unprocessed_hessian.txt",
bond_list_file="host_qm_bond_list.txt",
angle_list_file="host_qm_angle_list.txt",
hessian_file="host_qm_hessian.txt",
atom_names_file="host_qm_atom_names.txt",
bond_parameter_file="host_qm_bonds.txt",
angle_parameter_file="host_qm_angles.txt",
charge_parameter_file="host_qm_surround_charges.txt",
host_qm_pdb="host_qm.pdb",
functional="B3LYP",
basis_set="6-31G",
):
self.xyz_file = xyz_file
self.coordinate_file = coordinate_file
self.unprocessed_hessian_file = unprocessed_hessian_file
self.bond_list_file = bond_list_file
self.angle_list_file = angle_list_file
self.hessian_file = hessian_file
self.atom_names_file = atom_names_file
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.charge_parameter_file = charge_parameter_file
self.host_qm_pdb = host_qm_pdb
self.functional = functional
self.basis_set = basis_set
def get_xyz(self):
"""
Saves XYZ file from the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
to_begin = int(i)
cartesian_coords = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_coordinates / 5))
]
cartesian_list = []
for i in range(len(cartesian_coords)):
cartesian_list.append(cartesian_coords[i].strip().split())
coordinates_list = [
item for sublist in cartesian_list for item in sublist
]
list_coords = [float(x) * float(0.529) for x in coordinates_list]
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
N = int(no_coordinates / 3)
# Opens the new xyz file
file = open(self.xyz_file, "w")
file.write(str(N) + "\n \n")
coords = np.zeros((N, 3))
n = 0
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
# Print coordinates to new input_coords.xyz file
for i in range(0, N):
for j in range(0, 3):
coords[i][j] = list_coords[n]
n = n + 1
file.write(
names[i]
+ str(round(coords[i][0], 3))
+ " "
+ str(round(coords[i][1], 3))
+ " "
+ str(round(coords[i][2], 3))
+ "\n"
)
file.close()
np.savetxt(self.coordinate_file, coords, fmt="%s")
def get_unprocessed_hessian(self):
"""
Saves a text file of the unprocessed hessian matrix from the
formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
no_hessian = re.findall(r"\d+|\d+.\d+", lines[i])
no_hessian = int(no_hessian[0])
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
to_begin = int(i)
hessian = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_hessian / 5))
]
hessian_list = []
for i in range(len(hessian)):
hessian_list.append(hessian[i].strip().split())
unprocessed_Hessian = [
item for sublist in hessian_list for item in sublist
]
np.savetxt(
self.unprocessed_hessian_file, unprocessed_Hessian, fmt="%s",
)
def get_bond_angles(self):
"""
Saves a text file containing bonds and angles from the gaussian
log file.
"""
log_file = self.host_qm_pdb[:-4] + ".log"
fid = open(log_file, "r")
tline = fid.readline()
bond_list = []
angle_list = []
n = 1
n_bond = 1
n_angle = 1
tmp = "R" # States if bond or angle
B = []
# Finds the bond and angles from the .log file
while tline:
tline = fid.readline()
# Line starts at point when bond and angle list occurs
if (
len(tline) > 80
and tline[0:81].strip()
== "! Name Definition Value Derivative Info. !"
):
tline = fid.readline()
tline = fid.readline()
# Stops when all bond and angles recorded
while (tmp[0] == "R") or (tmp[0] == "A"):
line = tline.split()
tmp = line[1]
# Bond or angles listed as string
list_terms = line[2][2:-1]
# Bond List
if tmp[0] == "R":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
bond_list.append(x)
# Angle List
if tmp[0] == "A":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
angle_list.append(x)
tline = fid.readline()
# Leave loop
tline = -1
np.savetxt(self.bond_list_file, bond_list, fmt="%s")
np.savetxt(self.angle_list_file, angle_list, fmt="%s")
def get_hessian(self):
"""
Extracts hessian matrix from the unprocessed hessian matrix
and saves into a new file.
"""
unprocessed_Hessian = np.loadtxt(self.unprocessed_hessian_file)
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
length_hessian = 3 * N
hessian = np.zeros((length_hessian, length_hessian))
m = 0
# Write the hessian in a 2D array format
for i in range(0, (length_hessian)):
for j in range(0, (i + 1)):
hessian[i][j] = unprocessed_Hessian[m]
hessian[j][i] = unprocessed_Hessian[m]
m = m + 1
hessian = (hessian * (627.509391)) / (
0.529 ** 2
) # Change from Hartree/bohr to kcal/mol/ang
np.savetxt(self.hessian_file, hessian, fmt="%s")
def get_atom_names(self):
"""
Saves a list of atom names from the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
atom_names = []
for i in range(0, len(names)):
atom_names.append(names[i].strip() + str(i + 1))
np.savetxt(self.atom_names_file, atom_names, fmt="%s")
def get_bond_angle_params(self):
"""
Saves the bond and angle parameter files obtained from
the formatted checkpoint file.
"""
fchk_file = self.host_qm_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
coords = np.loadtxt(self.coordinate_file)
hessian = np.loadtxt(self.hessian_file)
bond_list = np.loadtxt(self.bond_list_file, dtype=int)
atom_names = np.loadtxt(self.atom_names_file, dtype=str)
# Find bond lengths
bond_lengths = np.zeros((N, N))
for i in range(0, N):
for j in range(0, N):
diff_i_j = np.array(coords[i, :]) - np.array(coords[j, :])
bond_lengths[i][j] = np.linalg.norm(diff_i_j)
eigenvectors = np.empty((3, 3, N, N), dtype=complex)
eigenvalues = np.empty((N, N, 3), dtype=complex)
partial_hessian = np.zeros((3, 3))
for i in range(0, N):
for j in range(0, N):
partial_hessian = hessian[
(i * 3) : ((i + 1) * 3), (j * 3) : ((j + 1) * 3)
]
[a, b] = np.linalg.eig(partial_hessian)
eigenvalues[i, j, :] = a
eigenvectors[:, :, i, j] = b
# Modified Seminario method to find the bond parameters
# and print them to file
file_bond = open(self.bond_parameter_file, "w")
k_b = np.zeros(len(bond_list))
bond_length_list = np.zeros(len(bond_list))
unique_values_bonds = [] # Used to find average values
for i in range(0, len(bond_list)):
AB = force_constant_bond(
bond_list[i][0],
bond_list[i][1],
eigenvalues,
eigenvectors,
coords,
)
BA = force_constant_bond(
bond_list[i][1],
bond_list[i][0],
eigenvalues,
eigenvectors,
coords,
)
# Order of bonds sometimes causes slight differences,
# find the mean
k_b[i] = np.real((AB + BA) / 2)
# Vibrational_scaling takes into account DFT deficities
# / anharmocity
vibrational_scaling = get_vibrational_scaling(
functional=self.functional, basis_set=self.basis_set
)
vibrational_scaling_squared = vibrational_scaling ** 2
k_b[i] = k_b[i] * vibrational_scaling_squared
bond_length_list[i] = bond_lengths[bond_list[i][0]][
bond_list[i][1]
]
file_bond.write(
atom_names[bond_list[i][0]]
+ "-"
+ atom_names[bond_list[i][1]]
+ " "
)
file_bond.write(
str("%#.5g" % k_b[i])
+ " "
+ str("%#.4g" % bond_length_list[i])
+ " "
+ str(bond_list[i][0] + 1)
+ " "
+ str(bond_list[i][1] + 1)
)
file_bond.write("\n")
unique_values_bonds.append(
[
atom_names[bond_list[i][0]],
atom_names[bond_list[i][1]],
k_b[i],
bond_length_list[i],
1,
]
)
file_bond.close()
angle_list = np.loadtxt(self.angle_list_file, dtype=int)
# Modified Seminario method to find the angle parameters
# and print them to file
file_angle = open(self.angle_parameter_file, "w")
k_theta = np.zeros(len(angle_list))
theta_0 = np.zeros(len(angle_list))
unique_values_angles = [] # Used to find average values
# Modified Seminario part goes here ...
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
# A structure is created with the index giving the central
# atom of the angle, an array then lists the angles with
# that central atom.
# i.e. central_atoms_angles{3} contains an array of angles
# with central atom 3
for i in range(0, len(coords)):
central_atoms_angles.append([])
for j in range(0, len(angle_list)):
if i == angle_list[j][1]:
# For angle ABC, atoms A C are written to array
AC_array = [angle_list[j][0], angle_list[j][2], j]
central_atoms_angles[i].append(AC_array)
# For angle ABC, atoms C A are written to array
CA_array = [angle_list[j][2], angle_list[j][0], j]
central_atoms_angles[i].append(CA_array)
# Sort rows by atom number
for i in range(0, len(coords)):
central_atoms_angles[i] = sorted(
central_atoms_angles[i], key=itemgetter(0)
)
# Find normals u_PA for each angle
unit_PA_all_angles = []
for i in range(0, len(central_atoms_angles)):
unit_PA_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the corresponding
# u_PA value is found for the plane ABC and bond AB,
# where ABC corresponds to the order of the arguements.
# This is why the reverse order was also added
unit_PA_all_angles[i].append(
u_PA_from_angles(
central_atoms_angles[i][j][0],
i,
central_atoms_angles[i][j][1],
coords,
)
)
# Finds the contributing factors from the other angle terms
# scaling_factor_all_angles = cell(max(max(angle_list)));
# This array will contain scaling factor and angle list position
scaling_factor_all_angles = []
for i in range(0, len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
n = 1
m = 1
angles_around = 0
additional_contributions = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][
j
][2]
# Goes through the list of angles with the same central
# atom and computes the term need for the modified Seminario method
# Forwards directions, finds the same bonds with the central atom i
while (
((j + n) < len(central_atoms_angles[i]))
and central_atoms_angles[i][j][0]
== central_atoms_angles[i][j + n][0]
):
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j + n][:],
)
)
)
** 2
)
n = n + 1
angles_around = angles_around + 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][
0
] == central_atoms_angles[i][j - m][0]:
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j - m][:],
)
)
)
** 2
)
m = m + 1
angles_around = angles_around + 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution to
# change to normal Seminario method comment out + part
scaling_factor_all_angles[i][j][0] = 1 + (
additional_contributions / (m + n - 2)
)
else:
scaling_factor_all_angles[i][j][0] = 1
scaling_factors_angles_list = []
for i in range(0, len(angle_list)):
scaling_factors_angles_list.append([])
# Orders the scaling factors according to the angle list
for i in range(0, len(central_atoms_angles)):
for j in range(0, len(central_atoms_angles[i])):
scaling_factors_angles_list[
scaling_factor_all_angles[i][j][1]
].append(scaling_factor_all_angles[i][j][0])
# Finds the angle force constants with the scaling factors
# included for each angle
for i in range(0, len(angle_list)):
# Ensures that there is no difference when the
# ordering is changed
[AB_k_theta, AB_theta_0] = force_angle_constant(
angle_list[i][0],
angle_list[i][1],
angle_list[i][2],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][0],
scaling_factors_angles_list[i][1],
)
[BA_k_theta, BA_theta_0] = force_angle_constant(
angle_list[i][2],
angle_list[i][1],
angle_list[i][0],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][1],
scaling_factors_angles_list[i][0],
)
k_theta[i] = (AB_k_theta + BA_k_theta) / 2
theta_0[i] = (AB_theta_0 + BA_theta_0) / 2
# Vibrational_scaling takes into account DFT
# deficities / anharmonicity
k_theta[i] = k_theta[i] * vibrational_scaling_squared
file_angle.write(
atom_names[angle_list[i][0]]
+ "-"
+ atom_names[angle_list[i][1]]
+ "-"
+ atom_names[angle_list[i][2]]
+ " "
)
file_angle.write(
str("%#.4g" % k_theta[i])
+ " "
+ str("%#.4g" % theta_0[i])
+ " "
+ str(angle_list[i][0] + 1)
+ " "
+ str(angle_list[i][1] + 1)
+ " "
+ str(angle_list[i][2] + 1)
)
file_angle.write("\n")
unique_values_angles.append(
[
atom_names[angle_list[i][0]],
atom_names[angle_list[i][1]],
atom_names[angle_list[i][2]],
k_theta[i],
theta_0[i],
1,
]
)
file_angle.close()
def get_charges(self):
"""
Saves the atomic charges in a text file obtained from
the Gaussian log file.
"""
log_file = self.host_qm_pdb[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
df_charge.to_csv(
self.charge_parameter_file, index=False, header=False, sep=" ",
)
class GuestAmberXMLAmber:
"""
A class used to generate a template force field XML file for the ligand
in order regenerate the reparameterised forcefield XML file.
This class contain methods to generate a template XML force field through
openforcefield. XML template generation can be obtained through different
file formats such as PDB, SDF, and SMI. Methods support charged ligands as
well. Re-parameterized XML force field files are then generated from the
template files. Different energy components such as the bond, angle,
torsional and non-bonded energies are computed for the non-reparametrized
and the reparameterized force fields. Difference between the
non-reparameterized and reparameterized force field energies can then be
analyzed.
...
Attributes
----------
charge : int
Charge of the ligand.
num_charge_atoms: int, optional
Number of charged atoms in the molecule.
charge_atom_1: int, optional
Charge on the first charged atom.
index_charge_atom_1: int, optional
Index of the first charged atom.
system_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
system_mol2: str, optional
Ligand Mol2 file obtained from PDB file.
system_in: str, optional
Prepi file as required by antechamber.
system_frcmod: str, optional
FRCMOD file as required by antechamber.
prmtop_system : str, optional
Topology file obtained from the ligand PDB.
inpcrd_system : str, optional
Coordinate file obtained from the ligand PDB using the
command saveamberparm.
system_leap : str, optional
Amber generated leap file for generating and saving topology
and coordinate files.
system_xml: str, optional
Serialized XML force field file of the ligand.
system_smi: str, optional
Ligand SMILES format file.
system_sdf: str, optional
Ligand SDF (structure-data) format file.
system_init_sdf: str, optional
Ligand SDF (structure-data) format file. This file will be
generated only if the ligand is charged.
index_charge_atom_2: int, optional
Index of the second charged atom of the ligand.
charge_atom_2: int, optional
Charge on the second charged atom of the ligand.
charge_parameter_file: str, optional
File containing the charges of ligand atoms and their corresponding
atoms.
system_qm_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
bond_parameter_file: str, optional
Text file containing the bond parameters for the ligand.
angle_parameter_file: str, optional
Text file containing the angle parameters of the ligand.
system_qm_params_file: str, optional
A text file containing the QM obtained parameters for the
ligand.
reparameterised_intermediate_system_xml_file: str, optional
XML foce field file with bond and angle parameter lines replaced by
corresponding values obtained from the QM calculations.
system_xml_non_bonded_file: str, optional
Text file to write the NonBondedForce Charge Parameters from
the non-parameterised system XML file.
system_xml_non_bonded_reparams_file: str, optional
Text file containing the non-bonded parameters parsed from the
XML force field file.
reparameterised_system_xml_file: str, optional
Reparameterized force field XML file obtained using
openforcefield.
non_reparameterised_system_xml_file: str, optional
Non-reparameterized force field XML file obtained using
openforcefield.
prmtop_system_non_params: str, optional
Amber generated topology file saved from the non-reparameterized
force field XML file for the ligand.
inpcrd_system_non_params: str, optional
Amber generated coordinate file saved from the non-reparameterized
force field XML file for the ligand.
prmtop_system_params: str, optional
Amber generated topology file saved from the reparameterized
force field XML file for the ligand.
inpcrd_system_params: str, optional
Amber generated coordinate file saved from the reparameterized
force field XML file for the ligand.
load_topology: str, optional
Argument to specify how to load the topology. Can either be "openmm"
or "parmed".
"""
def __init__(
self,
charge=0,
# TODO: some of these variables are ints, and shouldn't be initialized as strings
num_charge_atoms="",
charge_atom_1="",
index_charge_atom_1="",
system_pdb="guest_init_II.pdb",
system_mol2="guest.mol2",
system_in="guest.in",
system_frcmod="guest.frcmod",
prmtop_system="guest.prmtop",
inpcrd_system="guest.inpcrd",
system_leap="guest.leap",
system_xml="guest_init.xml",
system_smi="guest.smi",
system_sdf="guest.sdf",
system_init_sdf="guest_init.sdf",
index_charge_atom_2=" ",
charge_atom_2=" ",
charge_parameter_file="guest_qm_surround_charges.txt",
system_qm_pdb="guest_init_II.pdb",
bond_parameter_file="guest_bonds.txt",
angle_parameter_file="guest_angles.txt",
system_qm_params_file="guest_qm_params.txt",
reparameterised_intermediate_system_xml_file="guest_intermediate_reparameterised.xml",
system_xml_non_bonded_file="guest_xml_non_bonded.txt",
system_xml_non_bonded_reparams_file="guest_xml_non_bonded_reparams.txt",
reparameterised_system_xml_file="guest_reparameterised.xml",
non_reparameterised_system_xml_file="guest_init.xml",
prmtop_system_non_params="guest_non_params.prmtop",
inpcrd_system_non_params="guest_non_params.inpcrd",
prmtop_system_params="guest_params.prmtop",
inpcrd_system_params="guest_params.inpcrd",
load_topology="openmm",
):
self.charge = charge
self.num_charge_atoms = num_charge_atoms
self.charge_atom_1 = charge_atom_1
self.index_charge_atom_1 = index_charge_atom_1
self.system_pdb = system_pdb
self.system_mol2 = system_mol2
self.system_in = system_in
self.system_frcmod = system_frcmod
self.prmtop_system = prmtop_system
self.inpcrd_system = inpcrd_system
self.system_leap = system_leap
self.system_xml = system_xml
self.system_smi = system_smi
self.system_sdf = system_sdf
self.system_init_sdf = system_init_sdf
self.index_charge_atom_2 = index_charge_atom_2
self.charge_atom_2 = charge_atom_2
self.charge_parameter_file = charge_parameter_file
self.system_qm_pdb = system_qm_pdb
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.system_qm_params_file = system_qm_params_file
self.reparameterised_intermediate_system_xml_file = (
reparameterised_intermediate_system_xml_file
)
self.system_xml_non_bonded_file = system_xml_non_bonded_file
self.system_xml_non_bonded_reparams_file = (
system_xml_non_bonded_reparams_file
)
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.non_reparameterised_system_xml_file = (
non_reparameterised_system_xml_file
)
self.prmtop_system_non_params = prmtop_system_non_params
self.inpcrd_system_non_params = inpcrd_system_non_params
self.prmtop_system_params = prmtop_system_params
self.inpcrd_system_params = inpcrd_system_params
self.load_topology = load_topology
def generate_xml_antechamber(self):
"""
Generates an XML forcefield file from the PDB file through antechamber.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -omol2 " + self.system_mol2
"obabel -ipdb "
+ self.system_pdb
+ " -omol2 -O "
+ self.system_mol2
)
os.system(command)
command = (
"antechamber -i "
+ self.system_mol2
+ " -fi mol2 -o "
+ self.system_in
+ " -fo prepi -c bcc -nc "
+ str(self.charge)
)
os.system(command)
command = (
"parmchk2 -i "
+ self.system_in
+ " -o "
+ self.system_frcmod
+ " -f prepi -a Y"
)
os.system(command)
os.system(
"rm -rf ANTECHAMBER* leap.log sqm* ATOMTYPE.INF PREP.INF NEWPDB.PDB"
)
line_1 = "loadamberprep " + self.system_in
line_2 = "loadamberparams " + self.system_frcmod
line_3 = "pdb = loadpdb " + self.system_pdb
line_4 = (
"saveamberparm pdb "
+ self.prmtop_system
+ " "
+ self.inpcrd_system
)
line_5 = "quit"
with open(self.system_leap, "w") as f:
f.write(" " + "\n")
f.write(line_1 + "\n")
f.write(line_2 + "\n")
f.write(line_3 + "\n")
f.write(line_4 + "\n")
f.write(line_5 + "\n")
command = "tleap -f " + self.system_leap
os.system(command)
parm = parmed.load_file(self.prmtop_system, self.inpcrd_system)
system = parm.createSystem()
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_pdb_smi(self):
"""
Generates an XML forcefield file from the SMILES file through
openforcefield.
"""
# off_molecule = openforcefield.topology.Molecule(self.system_smi)
off_molecule = Molecule(self.system_smi)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_pdb_sdf(self):
"""
Generates an XML forcefield file from the SDF file through
openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_sdf
)
os.system(command)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(self):
"""
Generates an XML forcefield file for a singly charged ligand molecule
from the SDF file through openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_init_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_init_sdf
)
os.system(command)
with open(self.system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(self.system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(self.num_charge_atoms)
+ " "
+ str(self.index_charge_atom_1)
+ " "
+ str(self.charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_doubly_charged_pdb_sdf(self):
"""
Generates an XML forcefield file for a singly charged ligand molecule
from the SDF file through openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_init_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_init_sdf
)
os.system(command)
with open(self.system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(self.system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(self.num_charge_atoms)
+ " "
+ str(self.index_charge_atom_1)
+ " "
+ str(self.charge_atom_1)
+ " "
+ str(self.index_charge_atom_2)
+ " "
+ str(self.charge_atom_2)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def write_system_params(self):
"""
Saves the parameters obtained from the QM log files in a text file.
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
#k_bond_list = [
# i * 418.40 for i in k_bond_list
#] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
# TODO: units here? Anstroms per nm?
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * KCAL_MOL_PER_KJ_MOL for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [i * RADIANS_PER_DEGREE for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.system_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
# TODO: These should use string formatting to become more concise
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def write_intermediate_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for
ligand but without the QM obtained charges.
"""
# Bond Parameters
f_params = open(self.system_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [
comb_1,
comb_2,
comb_3,
comb_4,
comb_5,
comb_6,
]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
def write_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the ligand.
"""
# Bond Parameters
f_params = open(self.system_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
# TODO: These should use string formatting to become more concise
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [
comb_1,
comb_2,
comb_3,
comb_4,
comb_5,
comb_6,
]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
f_params = open(self.system_qm_params_file)
lines_params = f_params.readlines()
# Charge Parameters
for i in range(len(lines_params)):
if "Begin writing the Charge Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params[i]:
to_end = int(i)
charge_params = lines_params[to_begin + 1 : to_end]
non_bonded_index = []
for k in charge_params:
non_bonded_index.append(int(re.findall("[-+]?\d*\.\d+|\d+", k)[3]))
charge_for_index = []
for k in charge_params:
charge_for_index.append(
float(re.findall("[-+]?\d*\.\d+|\d+", k)[0])
)
xml_off = open(self.system_xml)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
nonbond_params = xml_off_lines[to_begin + 4 : to_end - 1]
# print(len(nonbond_params))
f_non_bonded = open(self.system_xml_non_bonded_file, "w")
for x in nonbond_params:
f_non_bonded.write(x)
f_non_bonded = open(self.system_xml_non_bonded_file)
lines_non_bonded = f_non_bonded.readlines()
# print(len(lines_non_bonded))
lines_non_bonded_to_write = []
for i in range(len(non_bonded_index)):
line_ = lines_non_bonded[non_bonded_index[i]]
# print(line_)
eps = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[0])
sig = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[2])
line_to_replace = (
" "
+ "<Particle "
+ "eps="
+ '"'
+ str(eps)
+ '"'
+ " "
+ "q="
+ '"'
+ str(charge_for_index[i])
+ '"'
+ " "
+ "sig="
+ '"'
+ str(sig)
+ '"'
+ "/>"
)
lines_non_bonded_to_write.append(line_to_replace)
data_ = list(zip(non_bonded_index, lines_non_bonded_to_write))
df_non_bonded_params = pd.DataFrame(
data_, columns=["line_index", "line"]
)
# print(df_non_bonded_params.head())
f_non_bonded_ = open(self.system_xml_non_bonded_file)
lines_non_bonded_ = f_non_bonded_.readlines()
for i in range(len(lines_non_bonded_)):
if i in non_bonded_index:
lines_non_bonded_[i] = (
df_non_bonded_params.loc[
df_non_bonded_params.line_index == i, "line"
].values[0]
) + "\n"
# print(len(lines_non_bonded_))
f_write_non_bonded_reparams = open(
self.system_xml_non_bonded_reparams_file, "w"
)
for p in range(len(lines_non_bonded_)):
f_write_non_bonded_reparams.write(lines_non_bonded_[p])
f_write_non_bonded_reparams.close()
f_ = open(self.system_xml_non_bonded_reparams_file)
lines_ = f_.readlines()
print(len(lines_) == len(lines_non_bonded))
xml_off = open(self.reparameterised_intermediate_system_xml_file)
# TODO: implement function(s) to read certain types of files. DRY principle
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
lines_before_params = xml_off_lines[: to_begin + 4]
f__ = open(self.system_xml_non_bonded_reparams_file)
lines_params_non_bonded = f__.readlines()
lines_after_params = xml_off_lines[to_end - 1 :]
f_reparams_xml = open(self.reparameterised_system_xml_file, "w")
for x in lines_before_params:
f_reparams_xml.write(x)
for x in lines_params_non_bonded:
f_reparams_xml.write(x)
for x in lines_after_params:
f_reparams_xml.write(x)
f_reparams_xml.close()
def save_amber_params_non_qm_charges(self):
"""
Saves amber generated topology files for the ligand
without the QM charges.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
openmm_system.save(self.prmtop_system_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(
self.reparameterised_intermediate_system_xml_file
),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
def save_amber_params(self):
"""
Saves amber generated topology files for the ligand.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
def analyze_diff_energies(self):
"""
Compares the energies of the ligand obtained from the non-parameterized
and the parameterized force field files.
"""
parm_non_params = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
prmtop_energy_decomposition_non_params = parmed.openmm.energy_decomposition_system(
parm_non_params, parm_non_params.createSystem()
)
prmtop_energy_decomposition_non_params_value = [
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_non_params_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_non_params = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_non_params_list,
prmtop_energy_decomposition_non_params_value,
)
),
columns=["Energy_term", "Energy_parm_non_params"],
)
df_energy_non_params = df_energy_non_params.set_index("Energy_term")
# print(df_energy_non_params)
parm_params = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
prmtop_energy_decomposition_params = parmed.openmm.energy_decomposition_system(
parm_params, parm_params.createSystem()
)
prmtop_energy_decomposition_params_value = [
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_params_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_params = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_params_list,
prmtop_energy_decomposition_params_value,
)
),
columns=["Energy_term", "Energy_parm_params"],
)
df_energy_params = df_energy_params.set_index("Energy_term")
# print(df_energy_params)
df_compare = pd.concat(
[df_energy_non_params, df_energy_params], axis=1
)
df_compare["Energy_difference"] = df_compare[
"Energy_parm_non_params"
].sub(df_compare["Energy_parm_params"], axis=0)
print(df_compare)
class HostAmberXMLAmber:
"""
A class used to generate a template force field XML file for the receptor
in order regenerate the reparameterised forcefield XML file.
This class contain methods to generate a template XML force field through
openforcefield. Re-parameterized XML force field files are then
generated from the template files. Different energy components such as
bond, angle, torsional and non-bonded energies are computed for the
non-reparametrized and the reparameterized force fields. Difference
between the non-reparameterized and reparameterized force field energies
can then be analyzed.
...
Attributes
----------
system_pdb: str, optional
Receptor PDB file with atom numbers beginning from 1.
system_sdf: str, optional
Receptor SDF (structure-data) format file.
charge : int
Charge of the ligand.
system_mol2: str, optional
Receptor Mol2 file obtained from PDB file.
system_in: str, optional
Prepi file as required by antechamber.
system_frcmod: str, optional
FRCMOD file as required by antechamber.
prmtop_system : str, optional
Topology file obtained from the receptor PDB.
inpcrd_system : str, optional
Coordinate file obtained from the receptor PDB using the
command saveamberparm.
system_leap : str, optional
Amber generated leap file for generating and saving topology
and coordinate files.
system_xml: str, optional
Serilazed XML force field file of the receptor.
sim_output: str, optional
PDB file containing the trajectory coordinates for the OpenMM
simulation.
sim_steps: str, optional
Number of steps in the OpenMM MD simulation.
charge_parameter_file: str, optional
File containing the charges of receptor atoms and their
corresponding atoms.
system_qm_pdb: str, optional
Receptor QM region's PDB file with atom numbers beginning from 1.
bond_parameter_file: str, optional
Text file containing the bond parameters for the receptor.
angle_parameter_file: str, optional
Text file containing the angle parameters of the receptor.
system_qm_params_file: str, optional
A text file containing the QM obtained parameters for the
receptor.
reparameterised_intermediate_system_xml_file: str, optional
XML force field file with bond and angle parameter lines replaced by
corresponding values obtained from the QM calculations.
system_xml_non_bonded_file: str, optional
Text file to write the NonBondedForce Charge Parameters from
the non-parameterised system XML file.
system_xml_non_bonded_reparams_file: str, optional
Text file containing the non-bonded parameters parsed from the
XML force field file.
reparameterised_system_xml_file: str, optional
Reparameterized force field XML file obtained using
openforcefield.
non_reparameterised_system_xml_file: str, optional
Non-reparameterized force field XML file obtained using
openforcefield.
prmtop_system_non_params: str, optional
Amber generated topology file saved from the non-reparameterized
force field XML file for the receptor.
inpcrd_system_non_params: str, optional
Amber generated coordinate file saved from the non-reparameterized
force field XML file for the receptor.
prmtop_system_params: str, optional
Amber generated topology file saved from the reparameterized
force field XML file for the receptor.
inpcrd_system_params: str, optional
Amber generated coordinate file saved from the reparameterized
force field XML file for the receptor.
load_topology: str, optional
Argument to specify how to load the topology. Can either be "openmm"
or "parmed".
"""
def __init__(
self,
system_pdb="host.pdb",
system_sdf="host.sdf",
charge=0,
system_mol2="host.mol2",
system_in="host.in",
system_frcmod="host.frcmod",
prmtop_system="host.prmtop",
inpcrd_system="host.inpcrd",
system_leap="host.leap",
system_xml="host.xml",
sim_output="sim_output.pdb",
sim_steps=1000,
charge_parameter_file="host_qm_surround_charges.txt",
system_qm_pdb="host_qm.pdb",
bond_parameter_file="host_qm_bonds.txt",
angle_parameter_file="host_qm_angles.txt",
system_qm_params_file="host_qm_params.txt",
reparameterised_intermediate_system_xml_file="host_intermediate_reparameterised.xml",
system_xml_non_bonded_file="host_xml_non_bonded.txt",
system_xml_non_bonded_reparams_file="host_xml_non_bonded_reparams.txt",
reparameterised_system_xml_file="host_reparameterised.xml",
non_reparameterised_system_xml_file="host.xml",
prmtop_system_non_params="host_non_params.prmtop",
inpcrd_system_non_params="host_non_params.inpcrd",
prmtop_system_params="host_params.prmtop",
inpcrd_system_params="host_params.inpcrd",
load_topology="openmm",
):
self.system_pdb = system_pdb
self.system_sdf = system_sdf
self.charge = charge
self.system_mol2 = system_mol2
self.system_in = system_in
self.system_frcmod = system_frcmod
self.prmtop_system = prmtop_system
self.inpcrd_system = inpcrd_system
self.system_leap = system_leap
self.system_xml = system_xml
self.sim_output = sim_output
self.sim_steps = sim_steps
self.charge_parameter_file = charge_parameter_file
self.system_qm_pdb = system_qm_pdb
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.system_qm_params_file = system_qm_params_file
self.reparameterised_intermediate_system_xml_file = (
reparameterised_intermediate_system_xml_file
)
self.system_xml_non_bonded_file = system_xml_non_bonded_file
self.system_xml_non_bonded_reparams_file = (
system_xml_non_bonded_reparams_file
)
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.non_reparameterised_system_xml_file = (
non_reparameterised_system_xml_file
)
self.prmtop_system_non_params = prmtop_system_non_params
self.inpcrd_system_non_params = inpcrd_system_non_params
self.prmtop_system_params = prmtop_system_params
self.inpcrd_system_params = inpcrd_system_params
self.load_topology = load_topology
def generate_xml_from_pdb_sdf(self):
"""
Generates an XML forcefield file from the SDF file through
openforcefield.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -osdf " + self.system_sdf
"obabel -ipdb "
+ self.system_pdb
+ " -osdf -O "
+ self.system_sdf
)
os.system(command)
# off_molecule = openforcefield.topology.Molecule(self.system_sdf)
off_molecule = Molecule(self.system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(self.system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_antechamber(self):
"""
Generates an XML forcefield file from the PDB file through antechamber.
"""
command = (
# "babel -ipdb " + self.system_pdb + " -omol2 " + self.system_mol2
"obabel -ipdb "
+ self.system_pdb
+ " -omol2 -O "
+ self.system_mol2
)
os.system(command)
command = (
"antechamber -i "
+ self.system_mol2
+ " -fi mol2 -o "
+ self.system_in
+ " -fo prepi -c bcc -nc "
+ str(self.charge)
)
os.system(command)
command = (
"parmchk2 -i "
+ self.system_in
+ " -o "
+ self.system_frcmod
+ " -f prepi -a Y"
)
os.system(command)
os.system(
"rm -rf ANTECHAMBER* leap.log sqm* ATOMTYPE.INF PREP.INF NEWPDB.PDB"
)
line_1 = "loadamberprep " + self.system_in
line_2 = "loadamberparams " + self.system_frcmod
line_3 = "pdb = loadpdb " + self.system_pdb
line_4 = (
"saveamberparm pdb "
+ self.prmtop_system
+ " "
+ self.inpcrd_system
)
line_5 = "quit"
with open(self.system_leap, "w") as f:
f.write(" " + "\n")
f.write(line_1 + "\n")
f.write(line_2 + "\n")
f.write(line_3 + "\n")
f.write(line_4 + "\n")
f.write(line_5 + "\n")
command = "tleap -f " + self.system_leap
os.system(command)
parm = parmed.load_file(self.prmtop_system, self.inpcrd_system)
system = parm.createSystem()
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def serialize_system(self):
pdb = simtk.openmm.app.PDBFile(self.system_pdb)
forcefield = simtk.openmm.app.ForceField("amber14-all.xml")
system = forcefield.createSystem(pdb.topology)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
pdb.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy(maxIterations=100000)
state = simulation.context.getState(getEnergy=True)
energy = state.getPotentialEnergy()
print(energy)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(self.sim_output, self.sim_steps / 10)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.sim_output
os.system(command)
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def write_system_params(self):
"""
Saves the parameters obtained from the QM log files in a text file.
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.system_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * 4.184 for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [(i * math.pi) / 180.00 for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.system_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def write_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the ligand.
"""
# Bond Parameters
f_params = open(self.system_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [
comb_1,
comb_2,
comb_3,
comb_4,
comb_5,
comb_6,
]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
f_params = open(self.system_qm_params_file)
lines_params = f_params.readlines()
# Charge Parameters
for i in range(len(lines_params)):
if "Begin writing the Charge Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params[i]:
to_end = int(i)
charge_params = lines_params[to_begin + 1 : to_end]
non_bonded_index = []
for k in charge_params:
non_bonded_index.append(int(re.findall("[-+]?\d*\.\d+|\d+", k)[3]))
charge_for_index = []
for k in charge_params:
charge_for_index.append(
float(re.findall("[-+]?\d*\.\d+|\d+", k)[0])
)
xml_off = open(self.system_xml)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
nonbond_params = xml_off_lines[to_begin + 4 : to_end - 1]
# print(len(nonbond_params))
f_non_bonded = open(self.system_xml_non_bonded_file, "w")
for x in nonbond_params:
f_non_bonded.write(x)
f_non_bonded = open(self.system_xml_non_bonded_file)
lines_non_bonded = f_non_bonded.readlines()
# print(len(lines_non_bonded))
lines_non_bonded_to_write = []
for i in range(len(non_bonded_index)):
line_ = lines_non_bonded[non_bonded_index[i]]
# print(line_)
eps = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[0])
sig = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[2])
line_to_replace = (
" "
+ "<Particle "
+ "eps="
+ '"'
+ str(eps)
+ '"'
+ " "
+ "q="
+ '"'
+ str(charge_for_index[i])
+ '"'
+ " "
+ "sig="
+ '"'
+ str(sig)
+ '"'
+ "/>"
)
lines_non_bonded_to_write.append(line_to_replace)
data_ = list(zip(non_bonded_index, lines_non_bonded_to_write))
df_non_bonded_params = pd.DataFrame(
data_, columns=["line_index", "line"]
)
# print(df_non_bonded_params.head())
f_non_bonded_ = open(self.system_xml_non_bonded_file)
lines_non_bonded_ = f_non_bonded_.readlines()
for i in range(len(lines_non_bonded_)):
if i in non_bonded_index:
lines_non_bonded_[i] = (
df_non_bonded_params.loc[
df_non_bonded_params.line_index == i, "line"
].values[0]
) + "\n"
# print(len(lines_non_bonded_))
f_write_non_bonded_reparams = open(
self.system_xml_non_bonded_reparams_file, "w"
)
for p in range(len(lines_non_bonded_)):
f_write_non_bonded_reparams.write(lines_non_bonded_[p])
f_write_non_bonded_reparams.close()
f_ = open(self.system_xml_non_bonded_reparams_file)
lines_ = f_.readlines()
print(len(lines_) == len(lines_non_bonded))
xml_off = open(self.reparameterised_intermediate_system_xml_file)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
lines_before_params = xml_off_lines[: to_begin + 4]
f__ = open(self.system_xml_non_bonded_reparams_file)
lines_params_non_bonded = f__.readlines()
lines_after_params = xml_off_lines[to_end - 1 :]
f_reparams_xml = open(self.reparameterised_system_xml_file, "w")
for x in lines_before_params:
f_reparams_xml.write(x)
for x in lines_params_non_bonded:
f_reparams_xml.write(x)
for x in lines_after_params:
f_reparams_xml.write(x)
f_reparams_xml.close()
def save_amber_params(self):
"""
Saves amber generated topology files for the ligand.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
def analyze_diff_energies(self):
"""
Compares the energies of the ligand obtained from the non-parameterized
and the parameterized force field files.
"""
parm_non_params = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params,
)
prmtop_energy_decomposition_non_params = parmed.openmm.energy_decomposition_system(
parm_non_params, parm_non_params.createSystem()
)
prmtop_energy_decomposition_non_params_value = [
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_non_params
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_non_params_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_non_params = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_non_params_list,
prmtop_energy_decomposition_non_params_value,
)
),
columns=["Energy_term", "Energy_parm_non_params"],
)
df_energy_non_params = df_energy_non_params.set_index("Energy_term")
# print(df_energy_non_params)
parm_params = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
prmtop_energy_decomposition_params = parmed.openmm.energy_decomposition_system(
parm_params, parm_params.createSystem()
)
prmtop_energy_decomposition_params_value = [
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem)
for elem in prmtop_energy_decomposition_params
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_params_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_params = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_params_list,
prmtop_energy_decomposition_params_value,
)
),
columns=["Energy_term", "Energy_parm_params"],
)
df_energy_params = df_energy_params.set_index("Energy_term")
# print(df_energy_params)
df_compare = pd.concat(
[df_energy_non_params, df_energy_params], axis=1
)
df_compare["Energy_difference"] = df_compare[
"Energy_parm_non_params"
].sub(df_compare["Energy_parm_params"], axis=0)
print(df_compare)
class RunOpenMMSims:
"""
A class used to run the OpenMM simulation on any specified system.
This class contain methods to run a MD simulation to confirm the
proper structure of the reparameterized forcefield files.
...
Attributes
----------
system_prmtop : str
Topology file of the system (receptor, ligand or
receptor - ligand complex)
system_inpcrd : str
Coordinate file of the system (receptor, ligand or
receptor - ligand complex)
system_pdb: str
PDB file of the system to run MD simulation (receptor,
ligand or receptor - ligand complex).
system_xml: str
Serialised XML file for the system.
sim_output: str, optional
PDB file containing the trajectory coordinates for the OpenMM
simulation.
sim_steps: str, optional
Number of steps in the OpenMM MD simulation.
"""
def __init__(
self,
system_prmtop,
system_inpcrd,
system_pdb,
system_xml,
system_output="sim_output.pdb",
sim_steps=1000,
):
self.system_prmtop = system_prmtop
self.system_inpcrd = system_inpcrd
self.system_pdb = system_pdb
self.system_xml = system_xml
self.system_output = system_output
self.sim_steps = sim_steps
def run_openmm_prmtop_inpcrd(self):
"""
Runs OpenMM MD simulation with prmtop and inpcrd file.
"""
print(
"Running OpenMM simulation for "
+ self.system_prmtop
+ " and "
+ self.system_inpcrd
)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.system_prmtop)
inpcrd = simtk.openmm.app.AmberInpcrdFile(self.system_inpcrd)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
def run_openmm_prmtop_pdb(self):
"""
Runs OpenMM MD simulation with prmtop and PDB file.
"""
print(
"Running OpenMM simulation for "
+ self.system_prmtop
+ " and "
+ self.system_pdb
)
pdb = simtk.openmm.app.PDBFile(self.system_pdb)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.system_prmtop)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
def run_openmm_xml_pdb(self):
"""
Runs OpenMM MD simulation with XML and PDB file.
"""
print(
"Running OpenMM simulation for "
+ self.system_xml
+ " and "
+ self.system_pdb
)
pdb = simtk.openmm.app.PDBFile(self.system_pdb)
ff_xml_file = open(self.system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
pdb.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
class MergeHostGuestTopology:
"""
A class used to merge the host and guest topology and coordinate
files.
...
Attributes
----------
host_prmtop : str
Topology file of the receptor.
guest_prmtop : str
Topology file of the ligand.
host_inpcrd : str
Coordinate file of the receptor.
guest_inpcrd : str
Coordinate file of the ligand.
system_prmtop : str
Topology file of the receptor - ligand complex.
system_inpcrd : str
Coordinate file of the receptor - ligand complex.
"""
def __init__(
self,
host_prmtop,
guest_prmtop,
host_inpcrd,
guest_inpcrd,
system_prmtop,
system_inpcrd,
):
self.host_prmtop = host_prmtop
self.guest_prmtop = guest_prmtop
self.host_inpcrd = host_inpcrd
self.guest_inpcrd = guest_inpcrd
self.system_prmtop = system_prmtop
self.system_inpcrd = system_inpcrd
def merge_topology_files(self):
"""
Merge the host and guest topology and coordinate files.
"""
print(
"Merging the "
+ self.host_prmtop
+ " "
+ self.guest_prmtop
+ " files"
)
print(
"Merging the "
+ self.host_inpcrd
+ " "
+ self.guest_inpcrd
+ " files"
)
host_system = parmed.load_file(self.host_prmtop, xyz=self.host_inpcrd)
guest_system = parmed.load_file(
self.guest_prmtop, xyz=self.guest_inpcrd
)
system = host_system + guest_system
system.save(self.system_prmtop, overwrite=True)
system.save(self.system_inpcrd, overwrite=True)
class TorsionDriveSims:
"""
A class used to create a filetree for torsion scan
using torsionsdrive for the dihedral angles of the ligand.
This class creates a directory for carrying out torsiondrive
calculations followed by fitting of torsional parameters. Methods
in this class are used to run torsiondrive calculations either for
all of the torsional angles, or for non-hydrogen / heavy atoms
contributing to the torsional angle.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
reparameterised_system_xml_file : str, optional
Reparamaterixed XML force field for the ligand.
torsion_xml_file : str, optional
A text file containing torsional parameters from
reparameterised XML file.
xyz_file : str, optional
XYZ file containing the coordinates of the guest molecule.
psi_input_file : str, optional
Input file for psi4 QM engine.
memory : int, optional
Memory (in GB) to be used.
basis_set: str, optional
Basis set to use for the QM engine.
functional: str, optional
Exchange/Correlation or hybrid Functional for the QM engine.
iterations : int, optional
Maximum number of geometry optimization steps.
method_torsion_drive : str, optional
The algorithm/package to use while running the torsiondrive
scan. Using --native_opt uses QM program native constrained
optimization algorithm and turns off geomeTRIC package.
system_bonds_file : str, optional
Text file containing bond parameters for the ligand.
tor_dir : str, optional
Torsiondrive directory containing separate torsiondrive
folders, each containing files for a separate torsiondrive
calculation for a particular dihedral angle.
dihedral_text_file : str, optional
Dihedral information file for torsiondrive.
template_pdb : str, optional
Guest PDB with atoms beginning from 1 to be used as a
template PDB to retrieve atom indices and symbols.
torsion_drive_run_file : str, optional
bash file for torsiondrive calculations.
dihedral_interval : int, optional
Grid spacing for dihedral scan, i.e. every n degrees
(where n is an integer), multiple values will be mapped
to each dihedral angle.
engine : str, optional
Engine for running torsiondrive scan.
energy_threshold : float, optional
Only activate grid points if the new optimization is lower than
the previous lowest energy (in a.u.).
"""
def __init__(
self,
charge=0,
multiplicity=1,
reparameterised_system_xml_file="guest_reparameterised.xml",
torsion_xml_file="guest_torsion_xml.txt",
xyz_file="guest_coords.xyz",
psi_input_file="torsion_drive_input.dat",
memory=50,
basis_set="6-31G",
functional="B3LYP",
iterations=2000,
method_torsion_drive="native_opt",
system_bonds_file="guest_bonds.txt",
tor_dir="torsion_dir",
dihedral_text_file="dihedrals.txt",
template_pdb="guest_init_II.pdb",
torsion_drive_run_file="run_command",
dihedral_interval=15,
engine="psi4",
energy_threshold=0.00001,
):
self.charge = charge
self.multiplicity = multiplicity
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.torsion_xml_file = torsion_xml_file
self.xyz_file = xyz_file
self.psi_input_file = psi_input_file
self.memory = memory
self.basis_set = basis_set
self.functional = functional
self.iterations = iterations
self.method_torsion_drive = method_torsion_drive
self.system_bonds_file = system_bonds_file
self.tor_dir = tor_dir
self.dihedral_text_file = dihedral_text_file
self.template_pdb = template_pdb
self.torsion_drive_run_file = torsion_drive_run_file
self.dihedral_interval = dihedral_interval
self.engine = engine
self.energy_threshold = energy_threshold
def write_torsion_drive_run_file(self):
"""
Saves a bash file for running torsion scans for torsiondrive.
"""
if self.method_torsion_drive == "geometric":
torsion_command = (
"torsiondrive-launch"
+ " "
+ self.psi_input_file
+ " "
+ self.dihedral_text_file
+ " "
+ "-g"
+ " "
+ str(self.dihedral_interval)
+ " "
+ "-e"
+ " "
+ self.engine
+ " "
+ "--energy_thresh"
+ " "
+ str(self.energy_threshold)
+ " "
+ "-v"
)
if self.method_torsion_drive == "native_opt":
torsion_command = (
"torsiondrive-launch"
+ " "
+ self.psi_input_file
+ " "
+ self.dihedral_text_file
+ " "
+ "-g"
+ " "
+ str(self.dihedral_interval)
+ " "
+ "-e"
+ " "
+ self.engine
+ " "
+ "--energy_thresh"
+ " "
+ str(self.energy_threshold)
+ " "
+ "--"
+ self.method_torsion_drive
+ " "
+ "-v"
)
print(torsion_command)
with open(self.torsion_drive_run_file, "w") as f:
f.write(torsion_command)
def write_tor_params_txt(self):
"""
Saves a text file containing torsional parameters from the reparameterized XML
force field file.
"""
xml_off = open(self.reparameterised_system_xml_file, "r")
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<Torsions>" in xml_off_lines[i]:
to_begin = int(i)
if "</Torsions>" in xml_off_lines[i]:
to_end = int(i)
torsion_params = xml_off_lines[to_begin + 1 : to_end]
k_list_off = []
for i in range(len(torsion_params)):
k_list_off.append(
float(re.findall("\d*\.?\d+", torsion_params[i])[0])
)
k_list_off = [round(num, 10) for num in k_list_off]
# print(k_list_off)
p1 = []
for i in range(len(torsion_params)):
p1.append(int(re.findall("\d*\.?\d+", torsion_params[i])[2]))
p1 = [i + 1 for i in p1]
# print(p1)
p2 = []
for i in range(len(torsion_params)):
p2.append(int(re.findall("\d*\.?\d+", torsion_params[i])[4]))
p2 = [i + 1 for i in p2]
# print(p2)
p3 = []
for i in range(len(torsion_params)):
p3.append(int(re.findall("\d*\.?\d+", torsion_params[i])[6]))
p3 = [i + 1 for i in p3]
# print(p3)
p4 = []
for i in range(len(torsion_params)):
p4.append(int(re.findall("\d*\.?\d+", torsion_params[i])[8]))
p4 = [i + 1 for i in p4]
# print(p4)
periodicity = []
for i in range(len(torsion_params)):
periodicity.append(
int(re.findall("\d*\.?\d+", torsion_params[i])[9])
)
# print(periodicity)
phase = []
for i in range(len(torsion_params)):
phase.append(float(re.findall("\d*\.?\d+", torsion_params[i])[10]))
phase = [round(num, 8) for num in phase]
# print(phase)
data_tuples = list(zip(k_list_off, p1, p2, p3, p4, periodicity, phase))
df_tor = pd.DataFrame(
data_tuples,
columns=["k", "p1", "p2", "p3", "p4", "periodicity", "phase",],
)
# print(df_tor.head())
df_tor.to_csv(
self.torsion_xml_file, index=False, header=False, sep=" "
)
def write_psi4_input(self):
"""
Writes a psi4 input QM file.
"""
xyz_lines = open(self.xyz_file, "r").readlines()[2:]
with open(self.psi_input_file, "w") as f:
f.write("memory" + " " + str(self.memory) + " " + "GB" + "\n")
f.write("molecule" + " " + "{" + "\n")
f.write(str(self.charge) + " " + str(self.multiplicity) + "\n")
for line in xyz_lines:
f.write(line)
f.write("}" + "\n")
f.write("set" + " " + "{" + "\n")
f.write("basis" + " " + self.basis_set + "\n")
if self.method_torsion_drive == "native_opt":
f.write("GEOM_MAXITER" + " " + str(self.iterations) + "\n")
f.write("}" + "\n")
if self.method_torsion_drive == "native_opt":
f.write(
"optimize" + "(" + "'" + self.functional + "'" ")" + "\n"
)
if self.method_torsion_drive == "geometric":
f.write(
"gradient" + "(" + "'" + self.functional + "'" ")" + "\n"
)
def create_torsion_drive_dir(self):
"""
Creates a directory for carrying out torsiondrive
calculations for all the proper dihedral angles.
"""
df_tor = pd.read_csv(
self.torsion_xml_file, header=None, delimiter=r"\s+"
)
df_tor.columns = [
"k",
"p1",
"p2",
"p3",
"p4",
"periodicity",
"phase",
]
# print(df_tor.head())
df_dihedrals = df_tor[["p1", "p2", "p3", "p4"]]
# print(df_dihedrals.head())
dihedrals_list_list = []
for i in range(len(df_dihedrals)):
dihedrals_list_list.append(df_dihedrals.iloc[i].values.tolist())
set_list = set()
unique_dihedrals_list_list = []
for x in dihedrals_list_list:
srtd = tuple(sorted(x))
if srtd not in set_list:
unique_dihedrals_list_list.append(x)
set_list.add(srtd)
# print(unique_dihedrals_list_list)
os.system("rm -rf " + self.tor_dir)
os.system("mkdir " + self.tor_dir)
parent_cwd = os.getcwd()
shutil.copy(
parent_cwd + "/" + self.psi_input_file,
parent_cwd + "/" + self.tor_dir + "/" + self.psi_input_file,
)
shutil.copy(
parent_cwd + "/" + self.template_pdb,
parent_cwd + "/" + self.tor_dir + "/" + self.template_pdb,
)
shutil.copy(
parent_cwd + "/" + self.torsion_drive_run_file,
parent_cwd
+ "/"
+ self.tor_dir
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(parent_cwd + "/" + self.tor_dir)
torsion_drive_dir = os.getcwd()
for i in range(len(unique_dihedrals_list_list)):
dir_name = "torsion_drive" + "_" + str(i)
os.system("rm -rf " + dir_name)
os.system("mkdir " + dir_name)
os.chdir(torsion_drive_dir + "/" + dir_name)
with open(self.dihedral_text_file, "w") as f:
f.write(
"# dihedral definition by atom indices starting from 1"
+ "\n"
)
f.write("# i j k l" + "\n")
i_ = unique_dihedrals_list_list[i][0]
j_ = unique_dihedrals_list_list[i][1]
k_ = unique_dihedrals_list_list[i][2]
l_ = unique_dihedrals_list_list[i][3]
f.write(
" "
+ "{:< 6d}".format(i_)
+ "{:< 6d}".format(j_)
+ "{:< 6d}".format(k_)
+ "{:< 6d}".format(l_)
+ "\n"
)
shutil.copy(
torsion_drive_dir + "/" + self.psi_input_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.psi_input_file,
)
shutil.copy(
torsion_drive_dir + "/" + self.template_pdb,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.template_pdb,
)
shutil.copy(
torsion_drive_dir + "/" + self.torsion_drive_run_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(torsion_drive_dir)
os.system("rm -rf " + self.psi_input_file)
os.system("rm -rf " + self.template_pdb)
os.system("rm -rf " + self.torsion_drive_run_file)
os.chdir(parent_cwd)
def create_non_H_torsion_drive_dir(self):
"""
Creates a directory for carrying out torsiondrive
calculations for all non-hydrogen torsional angles.
"""
df_tor = pd.read_csv(
self.torsion_xml_file, header=None, delimiter=r"\s+"
)
df_tor.columns = [
"k",
"p1",
"p2",
"p3",
"p4",
"periodicity",
"phase",
]
# print(df_tor.head())
ppdb = PandasPdb()
ppdb.read_pdb(self.template_pdb)
df_index_symbol = ppdb.df["ATOM"][["atom_number", "element_symbol"]]
# print(df_index_symbol.head())
df_dihedrals = df_tor[["p1", "p2", "p3", "p4"]]
# print(df_dihedrals.head())
dihedrals_list_list = []
for i in range(len(df_dihedrals)):
dihedrals_list_list.append(df_dihedrals.iloc[i].values.tolist())
set_list = set()
unique_dihedrals_list_list = []
for x in dihedrals_list_list:
srtd = tuple(sorted(x))
if srtd not in set_list:
unique_dihedrals_list_list.append(x)
set_list.add(srtd)
# print(unique_dihedrals_list_list)
atom_dihedral_list = []
for sub_list in unique_dihedrals_list_list:
atom_dihedral_list.append(
[
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[0]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[1]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[2]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[3]
]["element_symbol"].to_list()[0],
]
)
# print(atom_dihedral_list)
index_to_include = []
for i in range(len(atom_dihedral_list)):
if "H" not in atom_dihedral_list[i]:
index_to_include.append(i)
non_H_dihedrals = []
for i in index_to_include:
non_H_dihedrals.append(unique_dihedrals_list_list[i])
# print(non_H_dihedrals)
non_H_atom_dihedral_list = []
for sub_list in non_H_dihedrals:
non_H_atom_dihedral_list.append(
[
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[0]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[1]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[2]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[3]
]["element_symbol"].to_list()[0],
]
)
print(non_H_atom_dihedral_list)
os.system("rm -rf " + self.tor_dir)
os.system("mkdir " + self.tor_dir)
parent_cwd = os.getcwd()
shutil.copy(
parent_cwd + "/" + self.psi_input_file,
parent_cwd + "/" + self.tor_dir + "/" + self.psi_input_file,
)
shutil.copy(
parent_cwd + "/" + self.template_pdb,
parent_cwd + "/" + self.tor_dir + "/" + self.template_pdb,
)
shutil.copy(
parent_cwd + "/" + self.torsion_drive_run_file,
parent_cwd
+ "/"
+ self.tor_dir
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(parent_cwd + "/" + self.tor_dir)
torsion_drive_dir = os.getcwd()
for i in range(len(non_H_dihedrals)):
dir_name = "torsion_drive" + "_" + str(i)
os.system("rm -rf " + dir_name)
os.system("mkdir " + dir_name)
os.chdir(torsion_drive_dir + "/" + dir_name)
with open(self.dihedral_text_file, "w") as f:
f.write(
"# dihedral definition by atom indices starting from 1"
+ "\n"
)
f.write("# i j k l" + "\n")
i_ = non_H_dihedrals[i][0]
j_ = non_H_dihedrals[i][1]
k_ = non_H_dihedrals[i][2]
l_ = non_H_dihedrals[i][3]
f.write(
" "
+ "{:< 6d}".format(i_)
+ "{:< 6d}".format(j_)
+ "{:< 6d}".format(k_)
+ "{:< 6d}".format(l_)
+ "\n"
)
shutil.copy(
torsion_drive_dir + "/" + self.psi_input_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.psi_input_file,
)
shutil.copy(
torsion_drive_dir + "/" + self.template_pdb,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.template_pdb,
)
shutil.copy(
torsion_drive_dir + "/" + self.torsion_drive_run_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(torsion_drive_dir)
os.system("rm -rf " + self.psi_input_file)
os.system("rm -rf " + self.template_pdb)
os.system("rm -rf " + self.torsion_drive_run_file)
os.chdir(parent_cwd)
def create_non_H_bonded_torsion_drive_dir(self):
"""
Creates a directory for carrying out torsiondrive
calculations for all non-hydrogen bonded torsional angles.
"""
df_tor = pd.read_csv(
self.torsion_xml_file, header=None, delimiter=r"\s+"
)
df_tor.columns = [
"k",
"p1",
"p2",
"p3",
"p4",
"periodicity",
"phase",
]
# print(df_tor.head())
ppdb = PandasPdb()
ppdb.read_pdb(self.template_pdb)
df_index_symbol = ppdb.df["ATOM"][["atom_number", "element_symbol"]]
# print(df_index_symbol.head())
df_dihedrals = df_tor[["p1", "p2", "p3", "p4"]]
# print(df_dihedrals.head())
dihedrals_list_list = []
for i in range(len(df_dihedrals)):
dihedrals_list_list.append(df_dihedrals.iloc[i].values.tolist())
set_list = set()
unique_dihedrals_list_list = []
for x in dihedrals_list_list:
srtd = tuple(sorted(x))
if srtd not in set_list:
unique_dihedrals_list_list.append(x)
set_list.add(srtd)
# print(unique_dihedrals_list_list)
atom_dihedral_list = []
for sub_list in unique_dihedrals_list_list:
atom_dihedral_list.append(
[
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[0]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[1]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[2]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[3]
]["element_symbol"].to_list()[0],
]
)
# print(atom_dihedral_list)
index_to_include = []
for i in range(len(atom_dihedral_list)):
if "H" not in atom_dihedral_list[i]:
index_to_include.append(i)
non_H_dihedrals = []
for i in index_to_include:
non_H_dihedrals.append(unique_dihedrals_list_list[i])
# print(non_H_dihedrals)
non_H_atom_dihedral_list = []
for sub_list in non_H_dihedrals:
non_H_atom_dihedral_list.append(
[
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[0]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[1]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[2]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[3]
]["element_symbol"].to_list()[0],
]
)
# print(non_H_atom_dihedral_list)
df_bonds_all = pd.read_csv(
self.system_bonds_file, header=None, delimiter=r"\s+"
)
df_bonds_all.columns = [
"bond_names",
"k",
"angle",
"b1",
"b2",
]
df_bonds = df_bonds_all[["b1", "b2"]]
bonds_list_list = []
for i in range(len(df_bonds)):
bonds_list_list.append(df_bonds.iloc[i].values.tolist())
# print(bonds_list_list)
reverse_bond_list_list = []
for i in bonds_list_list:
reverse_bond_list_list.append(reverse_list(i))
# print(reverse_bond_list_list)
bond_list = bonds_list_list + reverse_bond_list_list
# print(bond_list)
non_H_dihedral_bonds_list = []
for i in non_H_dihedrals:
non_H_dihedral_bonds_list.append(
[[i[0], i[1]], [i[1], i[2]], [i[2], i[3]]]
)
# print(non_H_dihedral_bonds_list)
bonded_index_to_include = []
for i in range(len(non_H_dihedral_bonds_list)):
if [
non_H_dihedral_bonds_list[i][0] in bond_list,
non_H_dihedral_bonds_list[i][1] in bond_list,
non_H_dihedral_bonds_list[i][2] in bond_list,
] == [True, True, True]:
bonded_index_to_include.append(i)
# print(bonded_index_to_include)
non_H_bonded_dihedrals = []
for i in bonded_index_to_include:
non_H_bonded_dihedrals.append(non_H_dihedrals[i])
# print(non_H_bonded_dihedrals)
non_H_bonded_atom_dihedral_list = []
for sub_list in non_H_bonded_dihedrals:
non_H_bonded_atom_dihedral_list.append(
[
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[0]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[1]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[2]
]["element_symbol"].to_list()[0],
df_index_symbol.loc[
df_index_symbol["atom_number"] == sub_list[3]
]["element_symbol"].to_list()[0],
]
)
print(non_H_bonded_atom_dihedral_list)
os.system("rm -rf " + self.tor_dir)
os.system("mkdir " + self.tor_dir)
parent_cwd = os.getcwd()
shutil.copy(
parent_cwd + "/" + self.psi_input_file,
parent_cwd + "/" + self.tor_dir + "/" + self.psi_input_file,
)
shutil.copy(
parent_cwd + "/" + self.template_pdb,
parent_cwd + "/" + self.tor_dir + "/" + self.template_pdb,
)
shutil.copy(
parent_cwd + "/" + self.torsion_drive_run_file,
parent_cwd
+ "/"
+ self.tor_dir
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(parent_cwd + "/" + self.tor_dir)
torsion_drive_dir = os.getcwd()
for i in range(len(non_H_bonded_dihedrals)):
dir_name = "torsion_drive" + "_" + str(i)
os.system("rm -rf " + dir_name)
os.system("mkdir " + dir_name)
os.chdir(torsion_drive_dir + "/" + dir_name)
with open(self.dihedral_text_file, "w") as f:
f.write(
"# dihedral definition by atom indices starting from 1"
+ "\n"
)
f.write("# i j k l" + "\n")
i_ = non_H_bonded_dihedrals[i][0]
j_ = non_H_bonded_dihedrals[i][1]
k_ = non_H_bonded_dihedrals[i][2]
l_ = non_H_bonded_dihedrals[i][3]
f.write(
" "
+ "{:< 6d}".format(i_)
+ "{:< 6d}".format(j_)
+ "{:< 6d}".format(k_)
+ "{:< 6d}".format(l_)
+ "\n"
)
shutil.copy(
torsion_drive_dir + "/" + self.psi_input_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.psi_input_file,
)
shutil.copy(
torsion_drive_dir + "/" + self.template_pdb,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.template_pdb,
)
shutil.copy(
torsion_drive_dir + "/" + self.torsion_drive_run_file,
torsion_drive_dir
+ "/"
+ dir_name
+ "/"
+ self.torsion_drive_run_file,
)
os.chdir(torsion_drive_dir)
os.system("rm -rf " + self.psi_input_file)
os.system("rm -rf " + self.template_pdb)
os.system("rm -rf " + self.torsion_drive_run_file)
os.chdir(parent_cwd)
def run_torsion_sim(self):
"""
Run torsion scans using torsiondrive locally.
"""
parent_cwd = os.getcwd()
target_dir = parent_cwd + "/" + self.tor_dir
num_folders = 0
for _, dirnames, filenames in os.walk(target_dir):
num_folders += len(dirnames)
for i in range(num_folders):
dir_ = "torsion_drive" + "_" + str(i)
os.chdir(parent_cwd + "/" + self.tor_dir + "/" + dir_)
run_command = "bash" + " " + self.torsion_drive_run_file
# os.system(run_command)
print(run_command)
os.chdir(parent_cwd)
class TorsionDriveParams:
"""
A class used to parameterize the torsional parameters
of the ligand by fitting the torsional parameters obtained
from torsiondrive calculations.
Previously obtained reparameterized XML forcefield file did
not have the torsional parameters obtained from QM calculations.
The torsional parameters obtained from torsiondrive scans are
fitted and a new XML forcefield file is generated.
...
Attributes
----------
num_charge_atoms : int, optional
Number of charged atoms in the molecule.
index_charge_atom_1: int, optional
Index of the first charged atom.
charge_atom_1 : int, optional
Charge on the first charged atom.
tor_dir : str, optional
Torsiondrive directory containing separate torsiondrive folders,
each containing files for a separate torsiondrive calculation
for a particular dihedral angle.
reparameterized_torsional_params_file : str, optional
Text file containing the forcefield parameters for the
ligand previously obtained without torsional reparameterization.
psi_input_file : str, optional
Input file for psi4 QM engine.
xyz_file : str, optional
XYZ file for ligand coordinates.
coords_file : str, optional
Text file containing the XYZ coordinates of the ligand.
template_pdb: str, optional
Ligand PDB with atoms beginning from 1 to be used as a template PDB
to retrieve atom indices and symbols.
system_pdb: str, optional
PDB file for the torsiondrive torsion scans
system_sdf : str, optional
Maximum number of geometry optimization steps.
system_xml : str, optional
XML force field file for the ligand.
qm_scan_file : str, optional
Output scan file for the torsiondrive scans.
load_topology : str, optional
Argument to specify how to load the topology. Can either
be "openmm" or "parmed".
method : str, optional
Minimization method for fitting of torsional
parameters.
dihedral_text_file : str, optional
Dihedral information file for torsiondrive.
system_init_sdf : str, optional
Ligand SDF (structure-data) format file. This file will be generated
only if the ligand is charged.
reparameterised_system_xml_file : str, optional
Reparameterized force field XML file obtained using
openforcefield without torsional reparamaterization.
reparameterised_torsional_system_xml_file : str, optional
XML force field file for the ligand obtained with
torsional reparamaterization.
"""
def __init__(
self,
# TODO: some of these variables are ints, and should be initialized as ints
num_charge_atoms="",
index_charge_atom_1="",
charge_atom_1="",
tor_dir="torsion_dir",
reparameterized_torsional_params_file="reparameterized_torsional_params.txt",
psi_input_file="torsion_drive_input.dat",
xyz_file="torsion_drive_input.xyz",
coords_file="torsion_drive_input.txt",
template_pdb="guest_init_II.pdb",
system_pdb="torsion_drive_input.pdb",
system_sdf="torsion_drive_input.sdf",
system_xml="torsion_drive_input.xml",
qm_scan_file="scan.xyz",
load_topology="openmm",
method="L-BFGS-B",
dihedral_text_file="dihedrals.txt",
system_init_sdf="torsion_drive_input_init.sdf",
reparameterised_system_xml_file="guest_reparameterised.xml",
reparameterised_torsional_system_xml_file="guest_torsional_reparameterized.xml",
):
self.num_charge_atoms = num_charge_atoms
self.index_charge_atom_1 = index_charge_atom_1
self.charge_atom_1 = charge_atom_1
self.tor_dir = tor_dir
self.reparameterized_torsional_params_file = (
reparameterized_torsional_params_file
)
self.psi_input_file = psi_input_file
self.xyz_file = xyz_file
self.coords_file = coords_file
self.template_pdb = template_pdb
self.system_pdb = system_pdb
self.system_sdf = system_sdf
self.system_xml = system_xml
self.qm_scan_file = qm_scan_file
self.method = method
self.dihedral_text_file = dihedral_text_file
self.system_init_sdf = system_init_sdf
self.load_topology = load_topology
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.reparameterised_torsional_system_xml_file = (
reparameterised_torsional_system_xml_file
)
def write_reparams_torsion_lines(self):
"""
Saves a text file containing torsional parameters for the ligand
obtained through openforcefield.
"""
torsional_parameters_list = []
parent_cwd = os.getcwd()
# TODO: use os.path.join
target_dir = os.path.join(parent_cwd, self.tor_dir)
# TODO: let's use a more informative variable name than 'i'
for i in os.listdir(target_dir):
os.chdir(os.path.join(parent_cwd, self.tor_dir, i))
if os.path.isfile(self.qm_scan_file):
print("Entering directory" + " : " + os.getcwd())
torsiondrive_input_to_xyz(
psi_input_file=self.psi_input_file, xyz_file=self.xyz_file,
)
xyz_to_pdb(
xyz_file=self.xyz_file,
coords_file=self.coords_file,
template_pdb=self.template_pdb,
system_pdb=self.system_pdb,
)
generate_xml_from_charged_pdb_sdf(
system_pdb=self.system_pdb,
system_init_sdf=self.system_init_sdf,
system_sdf=self.system_sdf,
num_charge_atoms=self.num_charge_atoms,
index_charge_atom_1=self.index_charge_atom_1,
charge_atom_1=self.charge_atom_1,
system_xml=self.system_xml,
)
torsional_lines = get_torsional_lines(
template_pdb=self.template_pdb,
system_xml=self.system_xml,
qm_scan_file=self.qm_scan_file,
load_topology=self.load_topology,
method=self.method,
dihedral_text_file=self.dihedral_text_file,
)
# print(torsional_lines)
torsional_parameters_list.append(torsional_lines)
remove_mm_files(qm_scan_file=self.qm_scan_file)
os.chdir(parent_cwd)
else:
print("Entering directory" + " : " + os.getcwd())
print(
"Torsional Scan file not found, optimization may not \
be complete. Existing!!"
)
os.chdir(parent_cwd)
torsional_parameters = [
item for sublist in torsional_parameters_list for item in sublist
]
with open(self.reparameterized_torsional_params_file, "w") as f:
for i in torsional_parameters:
f.write(i + "\n")
def write_reparams_torsion_lines_charged(self):
"""
Saves a text file containing torsional parameters for a charged ligand
obtained through openforcefield.
"""
torsional_parameters_list = []
parent_cwd = os.getcwd()
target_dir = os.path.join(parent_cwd, self.tor_dir)
for i in os.listdir(target_dir):
os.chdir(os.path.join(parent_cwd, self.tor_dir, i))
if os.path.isfile(self.qm_scan_file):
print("Entering directory" + " : " + os.getcwd())
torsiondrive_input_to_xyz(
psi_input_file=self.psi_input_file, xyz_file=self.xyz_file,
)
xyz_to_pdb(
xyz_file=self.xyz_file,
coords_file=self.coords_file,
template_pdb=self.template_pdb,
system_pdb=self.system_pdb,
)
generate_xml_from_charged_pdb_sdf(
system_pdb=self.system_pdb,
system_init_sdf=self.system_init_sdf,
system_sdf=self.system_sdf,
num_charge_atoms=self.num_charge_atoms,
index_charge_atom_1=self.index_charge_atom_1,
charge_atom_1=self.charge_atom_1,
system_xml=self.system_xml,
)
torsional_lines = get_torsional_lines(
template_pdb=self.template_pdb,
system_xml=self.system_xml,
qm_scan_file=self.qm_scan_file,
load_topology=self.load_topology,
method=self.method,
dihedral_text_file=self.dihedral_text_file,
)
# print(torsional_lines)
torsional_parameters_list.append(torsional_lines)
remove_mm_files(qm_scan_file=self.qm_scan_file)
os.chdir(parent_cwd)
else:
print("Entering directory" + " : " + os.getcwd())
print(
"Torsional Scan file not found, optimization may not \
be complete. Existing!!"
)
os.chdir(parent_cwd)
torsional_parameters = [
item for sublist in torsional_parameters_list for item in sublist
]
with open(self.reparameterized_torsional_params_file, "w") as f:
for i in torsional_parameters:
f.write(i + "\n")
def write_torsional_reparams(self):
"""
Generates a XML force field file for the ligand with reparameterized
torsional parameters.
"""
with open(self.reparameterized_torsional_params_file, "r") as xml_tor:
xml_tor_lines = xml_tor.readlines()
non_zero_k_tor = []
for i in xml_tor_lines:
to_find = "k=" + '"' + "0.0" + '"'
if to_find not in i:
non_zero_k_tor.append(i)
# print(non_zero_k_tor)
p1 = []
for i in range(len(non_zero_k_tor)):
p1.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[2]))
# print(p1)
p2 = []
for i in range(len(non_zero_k_tor)):
p2.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[4]))
# print(p2)
p3 = []
for i in range(len(non_zero_k_tor)):
p3.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[6]))
# print(p3)
p4 = []
for i in range(len(non_zero_k_tor)):
p4.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[8]))
# print(p4)
periodicity = []
for i in range(len(non_zero_k_tor)):
periodicity.append(
int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[9])
)
# print(periodicity)
# TODO: there may be a way to consolidate the reparametrization of
# the XML file to obey the DRY principle
xml_tor_reparams = open(self.reparameterised_system_xml_file, "r")
xml_tor_reparams_lines = xml_tor_reparams.readlines()
# A string template and formatting should be used here
for j in range(len(xml_tor_reparams_lines)):
for i in range(len(non_zero_k_tor)):
to_find_tor = (
"p1="
+ '"'
+ str(p1[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(p2[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(p3[i])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(p4[i])
+ '"'
+ " "
+ "periodicity="
+ '"'
+ str(periodicity[i])
+ '"'
)
if to_find_tor in xml_tor_reparams_lines[j]:
# print(xml_tor_reparams_lines[j])
xml_tor_reparams_lines[j] = non_zero_k_tor[i]
with open(self.reparameterised_torsional_system_xml_file, "w") as f:
for i in xml_tor_reparams_lines:
f.write(i)
class PrepareSolvatedParams:
"""
A class used to integrate the parameterized topology
files of the receptor - ligand complex and the solvent.
This class contain methods to concatanate the solvent (and
ions ) and the receptor - ligand complex in a single
parameterized topology file (prmtop and inpcrd).
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
intermediate_pdb : str, optional
An intermediate PDB file formed during pdb4amber processing.
solvent_pdb : str, optional
PDB file containing the water, ions, etc.
solvent_prmtop : str, optional
Solvent topology file.
solvent_inpcrd : str, optional
Solvent coordinate file.
solvent_amber_pdb : str, optional
Solvent PDB file saved from Amber's tleap.
solvent_leap : str, optional
Solvent tleap file for parameterizing the solvent.
system_prmtop : str, optional
Topology file of the receptor - ligand complex.
system_inpcrd : str, optional
Coordinate file of the receptor - ligand complex.
system_output: str, optional
PDB file containing the trajectory coordinates for
the OpenMM simulation.
sim_steps: str, optional
Number of steps in the OpenMM MD simulation.
system_solvent_prmtop : str, optional
Topology file of the receptor - ligand complex and
the solvent.
system_solvent_inpcrd : str, optional
Coordinate file of the receptor - ligand complex and
the solvent.
system_solvent_pdb : str, optional
PDB file of the receptor - ligand complex and
the solvent.
"""
def __init__(
self,
init_pdb,
intermediate_pdb="intermediate.pdb",
solvent_pdb="solvent.pdb",
solvent_prmtop="solvent.prmtop",
solvent_inpcrd="solvent.inpcrd",
solvent_amber_pdb="solvent_amber.pdb",
solvent_leap="solvent.leap",
system_prmtop="system_torsional_params.prmtop",
system_inpcrd="system_torsional_params.inpcrd",
system_output="sim_output.pdb",
sim_steps=1000,
system_solvent_prmtop="system_qmmmrebind.prmtop",
system_solvent_inpcrd="system_qmmmrebind.inpcrd",
system_solvent_pdb="system_qmmmrebind.pdb",
):
self.init_pdb = init_pdb
self.intermediate_pdb = intermediate_pdb
self.solvent_pdb = solvent_pdb
self.solvent_prmtop = solvent_prmtop
self.solvent_inpcrd = solvent_inpcrd
self.solvent_amber_pdb = solvent_amber_pdb
self.solvent_leap = solvent_leap
self.system_prmtop = system_prmtop
self.system_inpcrd = system_inpcrd
self.system_output = system_output
self.sim_steps = sim_steps
self.system_solvent_prmtop = system_solvent_prmtop
self.system_solvent_inpcrd = system_solvent_inpcrd
self.system_solvent_pdb = system_solvent_pdb
def create_solvent_pdb(self):
"""
Generates a PDB file containing the solvent and the ions.
"""
water_variables = ["HOH", "WAT"]
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
pdb_variables = ["END", "CRYST"]
with open(self.init_pdb) as f1, open(self.intermediate_pdb, "w") as f2:
for line in f1:
if (
any(
water_variable in line
for water_variable in water_variables
)
or any(
pdb_variable in line for pdb_variable in pdb_variables
)
or any(ion in line for ion in ions)
):
f2.write(line)
command = (
"pdb4amber -i " + self.intermediate_pdb + " -o " + self.solvent_pdb
)
os.system(command)
command = (
"rm -rf "
+ self.solvent_pdb[:-4]
+ "_nonprot.pdb "
+ self.solvent_pdb[:-4]
+ "_renum.txt "
+ self.solvent_pdb[:-4]
+ "_sslink"
)
os.system(command)
command = "rm -rf " + self.intermediate_pdb
os.system(command)
def parameterize_solvent_pdb(self):
"""
Generates a topology file (prmtop) and a coordinate
file (inpcrd) for the solvent system.
"""
line_0 = " "
line_1 = "source leaprc.protein.ff14SB"
line_2 = "source leaprc.water.tip3p"
line_3 = "loadAmberParams frcmod.ionsjc_tip3p"
line_4 = "pdb = loadpdb " + self.solvent_pdb
line_5 = (
"saveamberparm pdb "
+ self.solvent_prmtop
+ " "
+ self.solvent_inpcrd
)
line_6 = "savepdb pdb " + self.solvent_amber_pdb
line_7 = "quit"
with open(self.solvent_leap, "w") as f:
f.write(line_0 + "\n")
f.write(line_1 + "\n")
f.write(line_2 + "\n")
f.write(line_3 + "\n")
f.write(line_4 + "\n")
f.write(line_5 + "\n")
f.write(line_6 + "\n")
f.write(line_7 + "\n")
command = "tleap -f " + self.solvent_leap
os.system(command)
command = "rm -rf leap.log " + self.solvent_leap
os.system(command)
def run_openmm_solvent_prmtop_inpcrd(self):
"""
Runs OpenMM MD simulation with prmtop and inpcrd file
for the solvent.
"""
print(
"Running OpenMM simulation for "
+ self.solvent_prmtop
+ " and "
+ self.solvent_inpcrd
)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.solvent_prmtop)
inpcrd = simtk.openmm.app.AmberInpcrdFile(self.solvent_inpcrd)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
def run_openmm_solvent_prmtop_pdb(self):
"""
Runs OpenMM MD simulation with prmtop and PDB file
for the solvent.
"""
print(
"Running OpenMM simulation for "
+ self.solvent_prmtop
+ " and "
+ self.solvent_amber_pdb
)
pdb = simtk.openmm.app.PDBFile(self.solvent_amber_pdb)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.solvent_prmtop)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
def merge_topology_files_system_solvent(self):
"""
Merge the system and solvent topology and coordinate
files.
"""
print(
"Merging the "
+ self.system_prmtop
+ " "
+ self.solvent_prmtop
+ " files"
)
print(
"Merging the "
+ self.system_inpcrd
+ " "
+ self.solvent_inpcrd
+ " files"
)
system = parmed.load_file(self.system_prmtop, xyz=self.system_inpcrd)
solvent = parmed.load_file(
self.solvent_prmtop, xyz=self.solvent_inpcrd
)
system_solvent = system + solvent
system_solvent.save(self.system_solvent_prmtop, overwrite=True)
system_solvent.save(self.system_solvent_inpcrd, overwrite=True)
system_solvent.save(self.system_solvent_pdb, overwrite=True)
def run_openmm_system_solvent_prmtop_inpcrd(self):
"""
Runs OpenMM MD simulation with prmtop and inpcrd file
for the solvent - system complex.
"""
print(
"Running OpenMM simulation for "
+ self.system_solvent_prmtop
+ " and "
+ self.system_solvent_inpcrd
)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.system_solvent_prmtop)
inpcrd = simtk.openmm.app.AmberInpcrdFile(self.system_solvent_inpcrd)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
def run_openmm_system_solvent_prmtop_pdb(self):
"""
Runs OpenMM MD simulation with prmtop and PDB file
for the solvent - system complex.
"""
print(
"Running OpenMM simulation for "
+ self.system_solvent_prmtop
+ " and "
+ self.system_solvent_pdb
)
pdb = simtk.openmm.app.PDBFile(self.system_solvent_pdb)
prmtop = simtk.openmm.app.AmberPrmtopFile(self.system_solvent_prmtop)
system = prmtop.createSystem()
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
simulation.minimizeEnergy(maxIterations=100000)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(
self.system_output, self.sim_steps / 10
)
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
reportInterval=int(self.sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(self.sim_steps)
command = "rm -rf " + self.system_output
os.system(command)
class SystemAmberSystem:
"""
A class used to generate a force field XML file for the system
from the given amber forcefield topology files and
regenerate the reparameterised forcefield XML file.
This class contain methods to generate a XML force field through
parmed if the amber forcefield topology files are given.
Re-parameterized XML force field files are then generated from
these XML focefield files. Different energy components such as
bond, angle, torsional and non-bonded energies are computed for the
non-reparametrized and the reparameterized force fields. Difference
between the non-reparameterized and reparameterized force field energies
can then be analyzed.
...
Attributes
----------
host_pdb: str, optional
PDB file for the host.
system_pdb: str, optional
PDB file for the system (host, guest and solvent).
prmtop_system: str, optional
Topology file for the system (host, guest and solvent).
system_xml: str, optional
Serialised XML forcefield file generated by parmed.
charge_parameter_file_guest: str, optional
Receptor PDB file with atom numbers beginning from 1.
guest_qm_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
bond_parameter_file_guest: str, optional
Text file containing the bond parameters for the ligand.
angle_parameter_file_guest: str, optional
Text file containing the angle parameters of the ligand.
guest_qm_params_file: str, optional
Text file containing QM obtained parameters for the ligand.
charge_parameter_file_host: str, optional
File containing the charges of receptor atoms and their
corresponding atoms.
bond_parameter_file_host: str, optional
Text file containing the bond parameters for the receptor.
host_qm_pdb: str, optional
Receptor QM region's PDB file with atom numbers beginning from 1.
angle_parameter_file_host: str, optional
Text file containing the angle parameters of the receptor.
host_qm_params_file: str, optional
Text file containing QM obtained parameters for the receptor.
host_guest_qm_params_file: str, optional
Text file containing QM obtained parameters for the system.
reparameterised_intermediate_system_xml_file: str, optional
XML force field file with bond and angle parameter lines replaced by
corresponding values obtained from the QM calculations.
system_xml_non_bonded_file: str, optional
Text file to write the NonBondedForce Charge Parameters from
the non-parameterised system XML file.
system_xml_non_bonded_reparams_file: str, optional
Text file containing the non-bonded parameters parsed from the
XML force field file.
reparameterised_system_xml_file: str, optional
Reparameterized force field XML file obtained using
openforcefield.
reparameterized_torsional_params_file : str, optional
Text file containing the forcefield parameters for the
ligand previously obtained without torsional reparameterization.
reparameterised_intermediate_torsional_system_xml_file : str, optional
XML force field file for the system (without the QM charges) obtained
with torsional reparamaterization.
reparameterised_torsional_system_xml_file : str, optional
XML force field file for the system obtained with
torsional reparamaterization.
load_topology: str, optional
Argument to specify how to load the topology. Can either be "openmm"
or "parmed".
non_reparameterised_system_xml_file: str, optional
Non-reparameterized force field XML file.
prmtop_system_non_params: str, optional
Non-reparameterized topology file.
inpcrd_system_non_params: str, optional
Non-reparameterized INPCRD file.
prmtop_system_intermediate_params: str, optional
Reparameterized topology file but without the QM charges.
inpcrd_system_intermediate_params: str, optional
Reparameterized INPCRD file but without the QM charges.
prmtop_system_params: str, optional
Reparameterized topology file.
inpcrd_system_params: str, optional
Reparameterized INPCRD file.
"""
def __init__(
self,
host_pdb="host.pdb",
system_pdb="",
prmtop_system="hostguest.parm7",
system_xml="hostguest.xml",
charge_parameter_file_guest="guest_qm_surround_charges.txt",
guest_qm_pdb="guest_init_II.pdb",
bond_parameter_file_guest="guest_bonds.txt",
angle_parameter_file_guest="guest_angles.txt",
guest_qm_params_file="guest_qm_params.txt",
charge_parameter_file_host="host_qm_surround_charges.txt",
bond_parameter_file_host="host_qm_bonds.txt",
host_qm_pdb="host_qm.pdb",
angle_parameter_file_host="host_qm_angles.txt",
host_qm_params_file="host_qm_params.txt",
host_guest_qm_params_file="host_guest_qm_params.txt",
reparameterised_intermediate_system_xml_file="hostguest_intermediate.xml",
system_xml_non_bonded_file="hostguest_non_bonded.txt",
system_xml_non_bonded_reparams_file="hostguest_non_bonded_reparams.txt",
reparameterised_system_xml_file="hostguest_reparameterised.xml",
reparameterized_torsional_params_file="reparameterized_torsional_params.txt",
reparameterised_intermediate_torsional_system_xml_file="reparameterized_torsional_params.txt",
reparameterised_torsional_system_xml_file="hostguest_torsional_reparameterised.xml",
load_topology="openmm",
non_reparameterised_system_xml_file="hostguest.xml",
prmtop_system_non_params="hostguest.parm7",
inpcrd_system_non_params="hostguest_non_params.pdb",
prmtop_system_intermediate_params="hostguest_intermediate.prmtop",
inpcrd_system_intermediate_params="hostguest_intermediate.inpcrd",
prmtop_system_params="hostguest_params.prmtop",
inpcrd_system_params="hostguest_params.inpcrd",
):
self.host_pdb = host_pdb
self.system_pdb = system_pdb
self.prmtop_system = prmtop_system
self.system_xml = system_xml
self.charge_parameter_file_guest = charge_parameter_file_guest
self.guest_qm_pdb = guest_qm_pdb
self.bond_parameter_file_guest = bond_parameter_file_guest
self.angle_parameter_file_guest = angle_parameter_file_guest
self.guest_qm_params_file = guest_qm_params_file
self.charge_parameter_file_host = charge_parameter_file_host
self.bond_parameter_file_host = bond_parameter_file_host
self.host_qm_pdb = host_qm_pdb
self.angle_parameter_file_host = angle_parameter_file_host
self.host_qm_params_file = host_qm_params_file
self.host_guest_qm_params_file = host_guest_qm_params_file
self.reparameterised_intermediate_system_xml_file = (
reparameterised_intermediate_system_xml_file
)
self.system_xml_non_bonded_file = system_xml_non_bonded_file
self.system_xml_non_bonded_reparams_file = (
system_xml_non_bonded_reparams_file
)
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.reparameterized_torsional_params_file = (
reparameterized_torsional_params_file
)
self.reparameterised_intermediate_torsional_system_xml_file = (
reparameterised_intermediate_torsional_system_xml_file
)
self.reparameterised_torsional_system_xml_file = (
reparameterised_torsional_system_xml_file
)
self.load_topology = load_topology
self.non_reparameterised_system_xml_file = (
non_reparameterised_system_xml_file
)
self.prmtop_system_non_params = prmtop_system_non_params
self.inpcrd_system_non_params = inpcrd_system_non_params
self.prmtop_system_intermediate_params = (
prmtop_system_intermediate_params
)
self.inpcrd_system_intermediate_params = (
inpcrd_system_intermediate_params
)
self.prmtop_system_params = prmtop_system_params
self.inpcrd_system_params = inpcrd_system_params
def generate_xml_from_prmtop(self):
"""
Generates a serialsed XML forcefield file through parmed, given
the PDB file and its corresponding topology file.
"""
parm = parmed.load_file(self.prmtop_system, self.system_pdb)
system = parm.createSystem()
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def write_guest_params_non_zero(self):
"""
Saves the parameters of the ligand obtained from the QM log files
in a text file starting from non-zero ( indexing begins from the
index of the last atom of the receptor ).
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file_guest, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
# atom_name_list = [i - 1 for i in atom_name_list]
no_host_atoms = get_num_host_atoms(self.host_pdb)
atom_name_list = [i - 1 + no_host_atoms for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file_guest, header=None, delimiter=r"\s+"
)
df.columns = ["bond", "k_bond", "bond_length", "bond_1", "bond_2"]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
# atom_name_list = [i - 1 for i in atom_name_list]
no_host_atoms = get_num_host_atoms(self.host_pdb)
atom_name_list = [i - 1 + no_host_atoms for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file_guest, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * 4.184 for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [(i * math.pi) / 180.00 for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.guest_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def write_host_params(self):
"""
Saves the parameters obtained from the QM log files of the
receptor in a text file.
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file_host, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file_host, header=None, delimiter=r"\s+"
)
df.columns = ["bond", "k_bond", "bond_length", "bond_1", "bond_2"]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
atom_name_list = [i - 1 for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file_host, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * 4.184 for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [(i * math.pi) / 180.00 for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.host_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def merge_qm_params(self):
"""
Saves the parameters of the ligand obtained from the QM log files
in a text file starting from non-zero ( indexing begins from the
index of the last atom of the receptor ).
"""
# Bond Parameters Host
f_params_host = open(self.host_qm_params_file, "r")
lines_params_host = f_params_host.readlines()
# Bond Parameters Host
for i in range(len(lines_params_host)):
if "Begin writing the Bond Parameters" in lines_params_host[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params_host[i]:
to_end = int(i)
bond_params_host = lines_params_host[to_begin + 1 : to_end]
# Bond Parameters Guest
f_params_guest = open(self.guest_qm_params_file, "r")
lines_params_guest = f_params_guest.readlines()
# Bond Parameters Guest
for i in range(len(lines_params_guest)):
if "Begin writing the Bond Parameters" in lines_params_guest[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params_guest[i]:
to_end = int(i)
bond_params_guest = lines_params_guest[to_begin + 1 : to_end]
bond_systems_params = bond_params_host + bond_params_guest
# Angle Parameters Host
f_params_host = open(self.host_qm_params_file, "r")
lines_params_host = f_params_host.readlines()
# Angle Parameters Host
for i in range(len(lines_params_host)):
if "Begin writing the Angle Parameters" in lines_params_host[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params_host[i]:
to_end = int(i)
angle_params_host = lines_params_host[to_begin + 1 : to_end]
# Angle Parameters Guest
f_params_guest = open(self.guest_qm_params_file, "r")
lines_params_guest = f_params_guest.readlines()
# Angle Parameters Guest
for i in range(len(lines_params_guest)):
if "Begin writing the Angle Parameters" in lines_params_guest[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params_guest[i]:
to_end = int(i)
angle_params_guest = lines_params_guest[to_begin + 1 : to_end]
angle_systems_params = angle_params_host + angle_params_guest
# Charge Parameters Host
f_params_host = open(self.host_qm_params_file, "r")
lines_params_host = f_params_host.readlines()
# Charge Parameters Host
for i in range(len(lines_params_host)):
if "Begin writing the Charge Parameters" in lines_params_host[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params_host[i]:
to_end = int(i)
charge_params_host = lines_params_host[to_begin + 1 : to_end]
# Charge Parameters Guest
f_params_guest = open(self.guest_qm_params_file, "r")
lines_params_guest = f_params_guest.readlines()
# Charge Parameters Guest
for i in range(len(lines_params_guest)):
if "Begin writing the Charge Parameters" in lines_params_guest[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params_guest[i]:
to_end = int(i)
charge_params_guest = lines_params_guest[to_begin + 1 : to_end]
charge_systems_params = charge_params_host + charge_params_guest
system_params = open(self.host_guest_qm_params_file, "w")
system_params.write("Begin writing the Bond Parameters" + "\n")
for i in range(len(bond_systems_params)):
system_params.write(bond_systems_params[i])
system_params.write("Finish writing the Bond Parameters" + "\n")
system_params.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(angle_systems_params)):
system_params.write(angle_systems_params[i])
system_params.write("Finish writing the Angle Parameters" + "\n")
system_params.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(charge_systems_params)):
system_params.write(charge_systems_params[i])
system_params.write("Finish writing the Charge Parameters" + "\n")
system_params.close()
def write_intermediate_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the
system but without the QM obtained charges.
"""
# Bond Parameters
f_params = open(self.host_guest_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [comb_1, comb_2, comb_3, comb_4, comb_5, comb_6]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
def write_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the system.
"""
# Bond Parameters
f_params = open(self.host_guest_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [comb_1, comb_2, comb_3, comb_4, comb_5, comb_6]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
f_params = open(self.host_guest_qm_params_file)
lines_params = f_params.readlines()
# Charge Parameters
for i in range(len(lines_params)):
if "Begin writing the Charge Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params[i]:
to_end = int(i)
charge_params = lines_params[to_begin + 1 : to_end]
non_bonded_index = []
for k in charge_params:
non_bonded_index.append(int(re.findall("[-+]?\d*\.\d+|\d+", k)[3]))
charge_for_index = []
for k in charge_params:
charge_for_index.append(
float(re.findall("[-+]?\d*\.\d+|\d+", k)[0])
)
xml_off = open(self.system_xml)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
nonbond_params = xml_off_lines[to_begin + 4 : to_end - 1]
# print(len(nonbond_params))
f_non_bonded = open(self.system_xml_non_bonded_file, "w")
for x in nonbond_params:
f_non_bonded.write(x)
f_non_bonded = open(self.system_xml_non_bonded_file)
lines_non_bonded = f_non_bonded.readlines()
# print(len(lines_non_bonded))
lines_non_bonded_to_write = []
for i in range(len(non_bonded_index)):
line_ = lines_non_bonded[non_bonded_index[i]]
# print(line_)
eps = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[0])
sig = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[2])
line_to_replace = (
" "
+ "<Particle "
+ "eps="
+ '"'
+ str(eps)
+ '"'
+ " "
+ "q="
+ '"'
+ str(charge_for_index[i])
+ '"'
+ " "
+ "sig="
+ '"'
+ str(sig)
+ '"'
+ "/>"
)
lines_non_bonded_to_write.append(line_to_replace)
data_ = list(zip(non_bonded_index, lines_non_bonded_to_write))
df_non_bonded_params = pd.DataFrame(
data_, columns=["line_index", "line"]
)
# print(df_non_bonded_params.head())
f_non_bonded_ = open(self.system_xml_non_bonded_file)
lines_non_bonded_ = f_non_bonded_.readlines()
for i in range(len(lines_non_bonded_)):
if i in non_bonded_index:
lines_non_bonded_[i] = (
df_non_bonded_params.loc[
df_non_bonded_params.line_index == i, "line"
].values[0]
) + "\n"
# print(len(lines_non_bonded_))
f_write_non_bonded_reparams = open(
self.system_xml_non_bonded_reparams_file, "w"
)
for p in range(len(lines_non_bonded_)):
f_write_non_bonded_reparams.write(lines_non_bonded_[p])
f_write_non_bonded_reparams.close()
f_ = open(self.system_xml_non_bonded_reparams_file)
lines_ = f_.readlines()
print(len(lines_) == len(lines_non_bonded))
xml_off = open(self.reparameterised_intermediate_system_xml_file)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
lines_before_params = xml_off_lines[: to_begin + 4]
f__ = open(self.system_xml_non_bonded_reparams_file)
lines_params_non_bonded = f__.readlines()
lines_after_params = xml_off_lines[to_end - 1 :]
f_reparams_xml = open(self.reparameterised_system_xml_file, "w")
for x in lines_before_params:
f_reparams_xml.write(x)
for x in lines_params_non_bonded:
f_reparams_xml.write(x)
for x in lines_after_params:
f_reparams_xml.write(x)
f_reparams_xml.close()
def write_torsional_reparams_intermediate(self):
"""
Generates a XML force field file for the system ( without the
QM charges ) with reparameterized torsional parameters of the ligand.
"""
no_host_atoms = get_num_host_atoms(self.host_pdb)
xml_tor = open(self.reparameterized_torsional_params_file, "r")
xml_tor_lines = xml_tor.readlines()
xml_tor_lines_renum = []
for i in xml_tor_lines:
i = i.replace(
"p1=" + '"' + str(int(re.findall("\d*\.?\d+", i)[2])) + '"',
"p1="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[2]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p2=" + '"' + str(int(re.findall("\d*\.?\d+", i)[4])) + '"',
"p2="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[4]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p3=" + '"' + str(int(re.findall("\d*\.?\d+", i)[6])) + '"',
"p3="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[6]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p4=" + '"' + str(int(re.findall("\d*\.?\d+", i)[8])) + '"',
"p4="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[8]) + no_host_atoms))
+ '"',
)
xml_tor_lines_renum.append(i)
non_zero_k_tor = []
for i in xml_tor_lines_renum:
to_find = "k=" + '"' + "0.0" + '"'
if to_find not in i:
non_zero_k_tor.append(i)
# print(non_zero_k_tor)
p1 = []
for i in range(len(non_zero_k_tor)):
p1.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[2]))
# print(p1)
p2 = []
for i in range(len(non_zero_k_tor)):
p2.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[4]))
# print(p2)
p3 = []
for i in range(len(non_zero_k_tor)):
p3.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[6]))
# print(p3)
p4 = []
for i in range(len(non_zero_k_tor)):
p4.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[8]))
# print(p4)
periodicity = []
for i in range(len(non_zero_k_tor)):
periodicity.append(
int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[9])
)
# print(periodicity)
xml_tor_reparams = open(
self.reparameterised_intermediate_system_xml_file, "r"
)
xml_tor_reparams_lines = xml_tor_reparams.readlines()
for j in range(len(xml_tor_reparams_lines)):
for i in range(len(non_zero_k_tor)):
to_find_tor = (
"p1="
+ '"'
+ str(p1[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(p2[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(p3[i])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(p4[i])
+ '"'
+ " "
+ "periodicity="
+ '"'
+ str(periodicity[i])
+ '"'
)
if to_find_tor in xml_tor_reparams_lines[j]:
print(xml_tor_reparams_lines[j])
xml_tor_reparams_lines[j] = non_zero_k_tor[i]
with open(
self.reparameterised_intermediate_torsional_system_xml_file, "w"
) as f:
for i in xml_tor_reparams_lines:
f.write(i)
def write_torsional_reparams(self):
"""
Generates a XML force field file for the system with reparameterized
torsional parameters of the ligand.
"""
no_host_atoms = get_num_host_atoms(self.host_pdb)
xml_tor = open(self.reparameterized_torsional_params_file, "r")
xml_tor_lines = xml_tor.readlines()
xml_tor_lines_renum = []
for i in xml_tor_lines:
i = i.replace(
"p1=" + '"' + str(int(re.findall("\d*\.?\d+", i)[2])) + '"',
"p1="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[2]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p2=" + '"' + str(int(re.findall("\d*\.?\d+", i)[4])) + '"',
"p2="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[4]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p3=" + '"' + str(int(re.findall("\d*\.?\d+", i)[6])) + '"',
"p3="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[6]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p4=" + '"' + str(int(re.findall("\d*\.?\d+", i)[8])) + '"',
"p4="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[8]) + no_host_atoms))
+ '"',
)
xml_tor_lines_renum.append(i)
non_zero_k_tor = []
for i in xml_tor_lines_renum:
to_find = "k=" + '"' + "0.0" + '"'
if to_find not in i:
non_zero_k_tor.append(i)
# print(non_zero_k_tor)
p1 = []
for i in range(len(non_zero_k_tor)):
p1.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[2]))
# print(p1)
p2 = []
for i in range(len(non_zero_k_tor)):
p2.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[4]))
# print(p2)
p3 = []
for i in range(len(non_zero_k_tor)):
p3.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[6]))
# print(p3)
p4 = []
for i in range(len(non_zero_k_tor)):
p4.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[8]))
# print(p4)
periodicity = []
for i in range(len(non_zero_k_tor)):
periodicity.append(
int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[9])
)
# print(periodicity)
xml_tor_reparams = open(self.reparameterised_system_xml_file, "r")
xml_tor_reparams_lines = xml_tor_reparams.readlines()
for j in range(len(xml_tor_reparams_lines)):
for i in range(len(non_zero_k_tor)):
to_find_tor = (
"p1="
+ '"'
+ str(p1[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(p2[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(p3[i])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(p4[i])
+ '"'
+ " "
+ "periodicity="
+ '"'
+ str(periodicity[i])
+ '"'
)
if to_find_tor in xml_tor_reparams_lines[j]:
print(xml_tor_reparams_lines[j])
xml_tor_reparams_lines[j] = non_zero_k_tor[i]
with open(self.reparameterised_torsional_system_xml_file, "w") as f:
for i in xml_tor_reparams_lines:
f.write(i)
def save_amber_params_non_qm_charges(self):
"""
Saves amber generated topology files for the system
without the QM charges.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
openmm_system.save(
self.prmtop_system_intermediate_params, overwrite=True
)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(
self.inpcrd_system_intermediate_params, overwrite=True
)
parm = parmed.load_file(
self.prmtop_system_intermediate_params,
self.inpcrd_system_intermediate_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
def save_amber_params(self):
"""
Saves amber generated topology files for the system.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(
self.reparameterised_torsional_system_xml_file
),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(
self.reparameterised_torsional_system_xml_file
),
)
openmm_system.save(self.prmtop_system_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_params, self.inpcrd_system_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.reparameterised_torsional_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
class SystemGuestAmberSystem:
"""
A class used to generate a force field XML file for the system
from the given amber forcefield topology files and
regenerate the reparameterised forcefield XML file but without
the host QM parameters.
This class contain methods to generate a XML force field through
parmed if the amber forcefield topology files are given.
Re-parameterized XML force field files are then generated from
these XML focefield files. Different energy components such as
bond, angle, torsional and non-bonded energies are computed for the
non-reparametrized and the reparameterized force fields. Difference
between the non-reparameterized and reparameterized force field energies
can then be analyzed.
...
Attributes
----------
host_pdb: str, optional
PDB file for the host.
system_pdb: str, optional
PDB file for the system (host, guest and solvent).
prmtop_system: str, optional
Topology file for the system (host, guest and solvent).
system_xml: str, optional
Serialised XML forcefield file generated by parmed.
charge_parameter_file_guest: str, optional
Receptor PDB file with atom numbers beginning from 1.
guest_qm_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
bond_parameter_file_guest: str, optional
Text file containing the bond parameters for the ligand.
angle_parameter_file_guest: str, optional
Text file containing the angle parameters of the ligand.
guest_qm_params_file: str, optional
Text file containing QM obtained parameters for the ligand.
reparameterised_intermediate_system_xml_file: str, optional
XML force field file with bond and angle parameter lines replaced by
corresponding values obtained from the QM calculations.
system_xml_non_bonded_file: str, optional
Text file to write the NonBondedForce Charge Parameters from
the non-parameterised system XML file.
system_xml_non_bonded_reparams_file: str, optional
Text file containing the non-bonded parameters parsed from the
XML force field file.
reparameterised_system_xml_file: str, optional
Reparameterized force field XML file obtained using
openforcefield.
reparameterized_torsional_params_file : str, optional
Text file containing the forcefield parameters for the
ligand previously obtained without torsional reparameterization.
reparameterised_intermediate_torsional_system_xml_file : str, optional
XML force field file for the system (without the QM charges) obtained
with torsional reparamaterization.
reparameterised_torsional_system_xml_file : str, optional
XML force field file for the system obtained with
torsional reparamaterization.
load_topology: str, optional
Argument to specify how to load the topology. Can either be "openmm"
or "parmed".
non_reparameterised_system_xml_file: str, optional
Non-reparameterized force field XML file.
prmtop_system_non_params: str, optional
Non-reparameterized topology file.
inpcrd_system_non_params: str, optional
Non-reparameterized INPCRD file.
prmtop_system_intermediate_params: str, optional
Reparameterized topology file but without the QM charges.
inpcrd_system_intermediate_params: str, optional
Reparameterized INPCRD file but without the QM charges.
prmtop_system_params: str, optional
Reparameterized topology file.
inpcrd_system_params: str, optional
Reparameterized INPCRD file.
"""
def __init__(
self,
host_pdb="host.pdb",
system_pdb="",
prmtop_system="hostguest.parm7",
system_xml="hostguest.xml",
charge_parameter_file_guest="guest_qm_surround_charges.txt",
guest_qm_pdb="guest_init_II.pdb",
bond_parameter_file_guest="guest_bonds.txt",
angle_parameter_file_guest="guest_angles.txt",
guest_qm_params_file="guest_qm_params.txt",
reparameterised_intermediate_system_xml_file="hostguest_intermediate.xml",
system_xml_non_bonded_file="hostguest_non_bonded.txt",
system_xml_non_bonded_reparams_file="hostguest_non_bonded_reparams.txt",
reparameterised_system_xml_file="hostguest_reparameterised.xml",
reparameterized_torsional_params_file="reparameterized_torsional_params.txt",
reparameterised_intermediate_torsional_system_xml_file="reparameterized_torsional_params.txt",
reparameterised_torsional_system_xml_file="hostguest_torsional_reparameterised.xml",
load_topology="openmm",
non_reparameterised_system_xml_file="hostguest.xml",
prmtop_system_non_params="hostguest.parm7",
inpcrd_system_non_params="hostguest_non_params.pdb",
prmtop_system_intermediate_params="hostguest_intermediate.prmtop",
inpcrd_system_intermediate_params="hostguest_intermediate.inpcrd",
prmtop_system_params="hostguest_params.prmtop",
inpcrd_system_params="hostguest_params.inpcrd",
):
self.host_pdb = host_pdb
self.system_pdb = system_pdb
self.prmtop_system = prmtop_system
self.system_xml = system_xml
self.charge_parameter_file_guest = charge_parameter_file_guest
self.guest_qm_pdb = guest_qm_pdb
self.bond_parameter_file_guest = bond_parameter_file_guest
self.angle_parameter_file_guest = angle_parameter_file_guest
self.guest_qm_params_file = guest_qm_params_file
self.reparameterised_intermediate_system_xml_file = (
reparameterised_intermediate_system_xml_file
)
self.system_xml_non_bonded_file = system_xml_non_bonded_file
self.system_xml_non_bonded_reparams_file = (
system_xml_non_bonded_reparams_file
)
self.reparameterised_system_xml_file = reparameterised_system_xml_file
self.reparameterized_torsional_params_file = (
reparameterized_torsional_params_file
)
self.reparameterised_intermediate_torsional_system_xml_file = (
reparameterised_intermediate_torsional_system_xml_file
)
self.reparameterised_torsional_system_xml_file = (
reparameterised_torsional_system_xml_file
)
self.load_topology = load_topology
self.non_reparameterised_system_xml_file = (
non_reparameterised_system_xml_file
)
self.prmtop_system_non_params = prmtop_system_non_params
self.inpcrd_system_non_params = inpcrd_system_non_params
self.prmtop_system_intermediate_params = (
prmtop_system_intermediate_params
)
self.inpcrd_system_intermediate_params = (
inpcrd_system_intermediate_params
)
self.prmtop_system_params = prmtop_system_params
self.inpcrd_system_params = inpcrd_system_params
def generate_xml_from_prmtop(self):
"""
Generates a serialsed XML forcefield file through parmed, given
the PDB file and its corresponding topology file.
"""
parm = parmed.load_file(self.prmtop_system, self.system_pdb)
system = parm.createSystem()
with open(self.system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def write_guest_params_non_zero(self):
"""
Saves the parameters of the ligand obtained from the QM log files
in a text file starting from non-zero ( indexing begins from the
index of the last atom of the receptor ).
"""
# Charges from QM files
df_charges = pd.read_csv(
self.charge_parameter_file_guest, header=None, delimiter=r"\s+"
)
df_charges.columns = ["atom", "charges"]
qm_charges = df_charges["charges"].values.tolist()
qm_charges = [round(num, 6) for num in qm_charges]
# print(qm_charges)
# Bond Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
# atom_name_list = [i - 1 for i in atom_name_list]
no_host_atoms = get_num_host_atoms(self.host_pdb)
atom_name_list = [i - 1 + no_host_atoms for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.bond_parameter_file_guest, header=None, delimiter=r"\s+"
)
df.columns = ["bond", "k_bond", "bond_length", "bond_1", "bond_2"]
# print(df.head())
bond_1_list = df["bond_1"].values.tolist()
bond_1_list = [x - 1 + min(atom_name_list) for x in bond_1_list]
bond_2_list = df["bond_2"].values.tolist()
bond_2_list = [x - 1 + min(atom_name_list) for x in bond_2_list]
# print(bond_1_list)
# print(bond_2_list)
k_bond_list = df["k_bond"].values.tolist()
k_bond_list = [
i * KCAL_MOL_PER_KJ_MOL * ANGSTROMS_PER_NM**2 for i in k_bond_list
] # kcal/mol * A^2 to kJ/mol * nm^2
k_bond_list = [round(num, 10) for num in k_bond_list]
# print(k_bond_list)
bond_length_list = df["bond_length"].values.tolist()
bond_length_list = [i / 10.00 for i in bond_length_list]
bond_length_list = [round(num, 6) for num in bond_length_list]
# print(bond_length_list)
# Angle Parameters from QM files
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_qm_pdb)
atom_name_list = ppdb.df["ATOM"]["atom_number"].values.tolist()
# atom_name_list = [i - 1 for i in atom_name_list]
no_host_atoms = get_num_host_atoms(self.host_pdb)
atom_name_list = [i - 1 + no_host_atoms for i in atom_name_list]
# print(atom_name_list)
df = pd.read_csv(
self.angle_parameter_file_guest, header=None, delimiter=r"\s+"
)
df.columns = [
"angle",
"k_angle",
"angle_degrees",
"angle_1",
"angle_2",
"angle_3",
]
# print(df.head())
angle_1_list = df["angle_1"].values.tolist()
angle_1_list = [x - 1 + min(atom_name_list) for x in angle_1_list]
# print(angle_1_list)
angle_2_list = df["angle_2"].values.tolist()
angle_2_list = [x - 1 + min(atom_name_list) for x in angle_2_list]
# print(angle_2_list)
angle_3_list = df["angle_3"].values.tolist()
angle_3_list = [x - 1 + min(atom_name_list) for x in angle_3_list]
# print(angle_3_list)
k_angle_list = df["k_angle"].values.tolist()
k_angle_list = [
i * KCAL_MOL_PER_KJ_MOL for i in k_angle_list
] # kcal/mol * radian^2 to kJ/mol * radian^2
k_angle_list = [round(num, 6) for num in k_angle_list]
# print(k_angle_list)
angle_list = df["angle_degrees"].values.tolist()
angle_list = [i * RADIANS_PER_DEGREE for i in angle_list]
angle_list = [round(num, 6) for num in angle_list]
# print(angle_list)
xml = open(self.guest_qm_params_file, "w")
xml.write("Begin writing the Bond Parameters" + "\n")
# TODO: use string formatting and templates to write these lines
for i in range(len(k_bond_list)):
xml.write(
" "
+ "<Bond"
+ " "
+ "d="
+ '"'
+ str(bond_length_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_bond_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(bond_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(bond_2_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Bond Parameters" + "\n")
xml.write("Begin writing the Angle Parameters" + "\n")
for i in range(len(k_angle_list)):
xml.write(
" "
+ "<Angle"
+ " "
+ "a="
+ '"'
+ str(angle_list[i])
+ '"'
+ " "
+ "k="
+ '"'
+ str(k_angle_list[i])
+ '"'
+ " "
+ "p1="
+ '"'
+ str(angle_1_list[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(angle_2_list[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(angle_3_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Angle Parameters" + "\n")
xml.write("Begin writing the Charge Parameters" + "\n")
for i in range(len(qm_charges)):
xml.write(
"<Particle"
+ " "
+ "q="
+ '"'
+ str(qm_charges[i])
+ '"'
+ " "
+ "eps="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "sig="
+ '"'
+ str(0.00)
+ '"'
+ " "
+ "atom="
+ '"'
+ str(atom_name_list[i])
+ '"'
+ "/>"
+ "\n"
)
xml.write("Finish writing the Charge Parameters" + "\n")
xml.close()
def write_intermediate_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the
system but without the QM obtained charges.
"""
# Bond Parameters
f_params = open(self.guest_qm_params_file, "r")
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [comb_1, comb_2, comb_3, comb_4, comb_5, comb_6]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
def write_reparameterised_system_xml(self):
"""
Writes a reparameterised XML force field file for the system.
"""
# Bond Parameters
with open(self.guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
# Bond Parameters
for i in range(len(lines_params)):
if "Begin writing the Bond Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Bond Parameters" in lines_params[i]:
to_end = int(i)
bond_params = lines_params[to_begin + 1 : to_end]
index_search_replace_bond = []
# TODO: again, use string formatting.
for i in bond_params:
bond_line_to_replace = i
# print(bond_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_bond = [comb_1, comb_2]
# print(comb_list_bond)
list_search_bond = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
]
# print(list_search_bond)
for j in range(len(list_search_bond)):
if list_search_bond[j] != []:
to_add = (list_search_bond[j], i)
# print(to_add)
index_search_replace_bond.append(to_add)
# Angle Parameters
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
index_search_replace_angle = []
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
index_search_replace_angle = []
# TODO: use string formatting (generalize to function?)
for i in angle_params:
angle_line_to_replace = i
# print(angle_line_to_replace)
atom_number_list = [
re.findall("\d*\.?\d+", i)[3],
re.findall("\d*\.?\d+", i)[5],
re.findall("\d*\.?\d+", i)[7],
]
# print(atom_number_list)
comb_1 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_2 = (
"p1="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_3 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[2]
+ '"'
+ "/>"
)
comb_4 = (
"p1="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_5 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[0]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[1]
+ '"'
+ "/>"
)
comb_6 = (
"p1="
+ '"'
+ atom_number_list[2]
+ '"'
+ " "
+ "p2="
+ '"'
+ atom_number_list[1]
+ '"'
+ " "
+ "p3="
+ '"'
+ atom_number_list[0]
+ '"'
+ "/>"
)
comb_list_angle = [comb_1, comb_2, comb_3, comb_4, comb_5, comb_6]
# print(comb_list_angle)
list_search_angle = [
search_in_file(file=self.system_xml, word=comb_1),
search_in_file(file=self.system_xml, word=comb_2),
search_in_file(file=self.system_xml, word=comb_3),
search_in_file(file=self.system_xml, word=comb_4),
search_in_file(file=self.system_xml, word=comb_5),
search_in_file(file=self.system_xml, word=comb_6),
]
# print(list_search_angle)
for j in range(len(list_search_angle)):
if list_search_angle[j] != []:
to_add = (list_search_angle[j], i)
# print(to_add)
index_search_replace_angle.append(to_add)
f_org = open(self.system_xml)
lines = f_org.readlines()
for i in range(len(index_search_replace_bond)):
line_number = index_search_replace_bond[i][0][0][0] - 1
line_to_replace = index_search_replace_bond[i][0][0][1]
line_to_replace_with = index_search_replace_bond[i][1]
lines[line_number] = line_to_replace_with
for i in range(len(index_search_replace_angle)):
line_number = index_search_replace_angle[i][0][0][0] - 1
line_to_replace = index_search_replace_angle[i][0][0][1]
line_to_replace_with = index_search_replace_angle[i][1]
lines[line_number] = line_to_replace_with
f_cop = open(self.reparameterised_intermediate_system_xml_file, "w")
for i in lines:
f_cop.write(i)
f_cop.close()
f_params = open(self.guest_qm_params_file)
lines_params = f_params.readlines()
# Charge Parameters
for i in range(len(lines_params)):
if "Begin writing the Charge Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Charge Parameters" in lines_params[i]:
to_end = int(i)
charge_params = lines_params[to_begin + 1 : to_end]
non_bonded_index = []
for k in charge_params:
non_bonded_index.append(int(re.findall("[-+]?\d*\.\d+|\d+", k)[3]))
charge_for_index = []
for k in charge_params:
charge_for_index.append(
float(re.findall("[-+]?\d*\.\d+|\d+", k)[0])
)
xml_off = open(self.system_xml)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
nonbond_params = xml_off_lines[to_begin + 4 : to_end - 1]
# print(len(nonbond_params))
f_non_bonded = open(self.system_xml_non_bonded_file, "w")
for x in nonbond_params:
f_non_bonded.write(x)
f_non_bonded = open(self.system_xml_non_bonded_file)
lines_non_bonded = f_non_bonded.readlines()
# print(len(lines_non_bonded))
lines_non_bonded_to_write = []
for i in range(len(non_bonded_index)):
line_ = lines_non_bonded[non_bonded_index[i]]
# print(line_)
eps = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[0])
sig = float(re.findall("[-+]?\d*\.\d+|\d+", line_)[2])
line_to_replace = (
" "
+ "<Particle "
+ "eps="
+ '"'
+ str(eps)
+ '"'
+ " "
+ "q="
+ '"'
+ str(charge_for_index[i])
+ '"'
+ " "
+ "sig="
+ '"'
+ str(sig)
+ '"'
+ "/>"
)
lines_non_bonded_to_write.append(line_to_replace)
data_ = list(zip(non_bonded_index, lines_non_bonded_to_write))
df_non_bonded_params = pd.DataFrame(
data_, columns=["line_index", "line"]
)
# print(df_non_bonded_params.head())
f_non_bonded_ = open(self.system_xml_non_bonded_file)
lines_non_bonded_ = f_non_bonded_.readlines()
for i in range(len(lines_non_bonded_)):
if i in non_bonded_index:
lines_non_bonded_[i] = (
df_non_bonded_params.loc[
df_non_bonded_params.line_index == i, "line"
].values[0]
) + "\n"
# print(len(lines_non_bonded_))
f_write_non_bonded_reparams = open(
self.system_xml_non_bonded_reparams_file, "w"
)
for p in range(len(lines_non_bonded_)):
f_write_non_bonded_reparams.write(lines_non_bonded_[p])
f_write_non_bonded_reparams.close()
f_ = open(self.system_xml_non_bonded_reparams_file)
lines_ = f_.readlines()
print(len(lines_) == len(lines_non_bonded))
xml_off = open(self.reparameterised_intermediate_system_xml_file)
xml_off_lines = xml_off.readlines()
for i in range(len(xml_off_lines)):
if "<GlobalParameters/>" in xml_off_lines[i]:
to_begin = int(i)
if "<Exceptions>" in xml_off_lines[i]:
to_end = int(i)
lines_before_params = xml_off_lines[: to_begin + 4]
f__ = open(self.system_xml_non_bonded_reparams_file)
lines_params_non_bonded = f__.readlines()
lines_after_params = xml_off_lines[to_end - 1 :]
f_reparams_xml = open(self.reparameterised_system_xml_file, "w")
for x in lines_before_params:
f_reparams_xml.write(x)
for x in lines_params_non_bonded:
f_reparams_xml.write(x)
for x in lines_after_params:
f_reparams_xml.write(x)
f_reparams_xml.close()
def write_torsional_reparams_intermediate(self):
"""
Generates a XML force field file for the system ( without the
QM charges ) with reparameterized torsional parameters of the ligand.
"""
no_host_atoms = get_num_host_atoms(self.host_pdb)
xml_tor = open(self.reparameterized_torsional_params_file, "r")
xml_tor_lines = xml_tor.readlines()
xml_tor_lines_renum = []
for i in xml_tor_lines:
i = i.replace(
"p1=" + '"' + str(int(re.findall("\d*\.?\d+", i)[2])) + '"',
"p1="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[2]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p2=" + '"' + str(int(re.findall("\d*\.?\d+", i)[4])) + '"',
"p2="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[4]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p3=" + '"' + str(int(re.findall("\d*\.?\d+", i)[6])) + '"',
"p3="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[6]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p4=" + '"' + str(int(re.findall("\d*\.?\d+", i)[8])) + '"',
"p4="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[8]) + no_host_atoms))
+ '"',
)
xml_tor_lines_renum.append(i)
non_zero_k_tor = []
for i in xml_tor_lines_renum:
to_find = "k=" + '"' + "0.0" + '"'
if to_find not in i:
non_zero_k_tor.append(i)
# print(non_zero_k_tor)
p1 = []
for i in range(len(non_zero_k_tor)):
p1.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[2]))
# print(p1)
p2 = []
for i in range(len(non_zero_k_tor)):
p2.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[4]))
# print(p2)
p3 = []
for i in range(len(non_zero_k_tor)):
p3.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[6]))
# print(p3)
p4 = []
for i in range(len(non_zero_k_tor)):
p4.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[8]))
# print(p4)
periodicity = []
for i in range(len(non_zero_k_tor)):
periodicity.append(
int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[9])
)
# print(periodicity)
xml_tor_reparams = open(
self.reparameterised_intermediate_system_xml_file, "r"
)
xml_tor_reparams_lines = xml_tor_reparams.readlines()
for j in range(len(xml_tor_reparams_lines)):
for i in range(len(non_zero_k_tor)):
to_find_tor = (
"p1="
+ '"'
+ str(p1[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(p2[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(p3[i])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(p4[i])
+ '"'
+ " "
+ "periodicity="
+ '"'
+ str(periodicity[i])
+ '"'
)
if to_find_tor in xml_tor_reparams_lines[j]:
print(xml_tor_reparams_lines[j])
xml_tor_reparams_lines[j] = non_zero_k_tor[i]
with open(
self.reparameterised_intermediate_torsional_system_xml_file, "w"
) as f:
for i in xml_tor_reparams_lines:
f.write(i)
def write_torsional_reparams(self):
"""
Generates a XML force field file for the system with reparameterized
torsional parameters of the ligand.
"""
no_host_atoms = get_num_host_atoms(self.host_pdb)
with open(self.reparameterized_torsional_params_file, "r") as xml_tor:
xml_tor_lines = xml_tor.readlines()
xml_tor_lines_renum = []
# TODO: string formatting and clean up this code to be more concise
for i in xml_tor_lines:
i = i.replace(
"p1=" + '"' + str(int(re.findall("\d*\.?\d+", i)[2])) + '"',
"p1="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[2]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p2=" + '"' + str(int(re.findall("\d*\.?\d+", i)[4])) + '"',
"p2="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[4]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p3=" + '"' + str(int(re.findall("\d*\.?\d+", i)[6])) + '"',
"p3="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[6]) + no_host_atoms))
+ '"',
)
i = i.replace(
"p4=" + '"' + str(int(re.findall("\d*\.?\d+", i)[8])) + '"',
"p4="
+ '"'
+ str(int(int(re.findall("\d*\.?\d+", i)[8]) + no_host_atoms))
+ '"',
)
xml_tor_lines_renum.append(i)
non_zero_k_tor = []
for i in xml_tor_lines_renum:
to_find = "k=" + '"' + "0.0" + '"'
if to_find not in i:
non_zero_k_tor.append(i)
# print(non_zero_k_tor)
p1 = []
for i in range(len(non_zero_k_tor)):
p1.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[2]))
# print(p1)
p2 = []
for i in range(len(non_zero_k_tor)):
p2.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[4]))
# print(p2)
p3 = []
for i in range(len(non_zero_k_tor)):
p3.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[6]))
# print(p3)
p4 = []
for i in range(len(non_zero_k_tor)):
p4.append(int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[8]))
# print(p4)
periodicity = []
for i in range(len(non_zero_k_tor)):
periodicity.append(
int(re.findall("\d*\.?\d+", non_zero_k_tor[i])[9])
)
# print(periodicity)
xml_tor_reparams = open(self.reparameterised_system_xml_file, "r")
xml_tor_reparams_lines = xml_tor_reparams.readlines()
for j in range(len(xml_tor_reparams_lines)):
for i in range(len(non_zero_k_tor)):
to_find_tor = (
"p1="
+ '"'
+ str(p1[i])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(p2[i])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(p3[i])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(p4[i])
+ '"'
+ " "
+ "periodicity="
+ '"'
+ str(periodicity[i])
+ '"'
)
if to_find_tor in xml_tor_reparams_lines[j]:
print(xml_tor_reparams_lines[j])
xml_tor_reparams_lines[j] = non_zero_k_tor[i]
with open(self.reparameterised_torsional_system_xml_file, "w") as f:
for i in xml_tor_reparams_lines:
f.write(i)
def save_amber_params_non_qm_charges(self):
"""
Saves amber generated topology files for the system
without the QM charges.
"""
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
openmm_system.save(self.prmtop_system_non_params, overwrite=True)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(self.inpcrd_system_non_params, overwrite=True)
parm = parmed.load_file(
self.prmtop_system_non_params, self.inpcrd_system_non_params
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(self.non_reparameterised_system_xml_file),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_non_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_non_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = pd.concat([df_energy_xml, df_energy_prmtop], axis=1)
print(df_compare)
if self.load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(self.system_pdb, structure=True).topology,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
if self.load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(self.system_pdb).topology,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
openmm_system.save(
self.prmtop_system_intermediate_params, overwrite=True
)
openmm_system.coordinates = parmed.load_file(
self.system_pdb, structure=True
).coordinates
openmm_system.save(
self.inpcrd_system_intermediate_params, overwrite=True
)
parm = parmed.load_file(
self.prmtop_system_intermediate_params,
self.inpcrd_system_intermediate_params,
)
xml_energy_decomposition = parmed.openmm.energy_decomposition_system(
openmm_system,
parmed.load_file(
self.reparameterised_intermediate_torsional_system_xml_file
),
)
xml_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in xml_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
xml_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_xml = pd.DataFrame(
list(
zip(
xml_energy_decomposition_list,
xml_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_xml_params"],
)
df_energy_xml = df_energy_xml.set_index("Energy_term")
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
prmtop_energy_decomposition_value = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("PeriodicTorsionForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
prmtop_energy_decomposition_list = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_prmtop = pd.DataFrame(
list(
zip(
prmtop_energy_decomposition_list,
prmtop_energy_decomposition_value,
)
),
columns=["Energy_term", "Energy_prmtop_params"],
)
df_energy_prmtop = df_energy_prmtop.set_index("Energy_term")
df_compare = | pd.concat([df_energy_xml, df_energy_prmtop], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
( | u('x') | pandas.compat.u |
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > pd.Timestamp(max_date): # Avoids double counting if provisional update came after real update
frames.append(df)
frames.append(hhs_data)
hhs_data = (pd.concat(frames))
print("LOG: Added HHS Provisional data")
# Make date columns in proper format
# hhs_data.date = hhs_data.date.apply(lambda x: x[:10])
hhs_data.date= pd.to_datetime(hhs_data.date)
# hhs_data.to_csv("../data/hospitalizations.csv")
print("LOG: Wrote HHS data to CSV")
test_data.date = test_data.date.apply(lambda x: x[:10])
test_data.date = pd.to_datetime(test_data.date)
nyt_data_us.date = pd.to_datetime(nyt_data_us.date)
nyt_data_state.date = pd.to_datetime(nyt_data_state.date)
print("LOG: Done getting data")
"""
get_state_cases
Creates dataframe of time series date and cases for given state
inputs:
state_codes: List of 2-letter codes of states to query
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_cases(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)][:]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.cases.sum() / states_population * 1000000
else:
case_sum = day_data.cases.sum()
newRow = {'date': curr_date, 'cases': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_cases(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'cases']]
"""
get_state_deaths
Same as above, deaths
"""
def get_state_deaths(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.deaths.sum() / states_population * 1000000
else:
case_sum = day_data.deaths.sum()
newRow = {'date': curr_date, 'deaths': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return | pd.DataFrame(lst) | pandas.DataFrame |
'''
Utility scripts
'''
import argparse
import copy
import logging
import sys
import typing
import pandas as pd
_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def time_granularity_value_to_stringfy_time_format(granularity_int: int) -> str:
try:
granularity_int = int(granularity_int)
except ValueError:
raise ValueError("The given granularity is not int format!")
granularity_dict = {
14: "%Y-%m-%d %H:%M:%S",
13: "%Y-%m-%d %H:%M",
12: "%Y-%m-%d %H",
11: "%Y-%m-%d",
10: "%Y-%m",
9: "%Y"
}
if granularity_int in granularity_dict:
return granularity_dict[granularity_int]
else:
_logger.warning("Unknown time granularity value as {}! Will use second level.".format(str(granularity_int)))
return granularity_dict[14]
def get_time_granularity(time_column: pd.DataFrame) -> str:
if "datetime" not in time_column.dtype.name:
try:
time_column = | pd.to_datetime(time_column) | pandas.to_datetime |
import copy
from typing import Optional, Collection, Any, Dict, Tuple
from causalpy.bayesian_graphs.scm import (
SCM,
NoiseGenerator,
Assignment,
IdentityAssignment,
MaxAssignment,
SignSqrtAssignment,
SinAssignment,
)
import networkx as nx
import pandas as pd
import numpy as np
class SumAssignment(Assignment):
def __init__(self, *assignments, offset: float = 0.0):
super().__init__()
self.assignment = assignments
self.offset = offset
self.coefficients = np.ones(len(assignments))
def __call__(self, noise, *args, **kwargs):
args = self.parse_call_input(*args, **kwargs)
return noise + self.coefficients @ args + self.offset
def __len__(self):
return len(self.coefficients)
def function_str(self, variable_names: Optional[Collection[str]] = None):
rep = "N"
var_strs = [
f"{assignment.function_str([var])}"
for assignment, var in zip(self.assignment, variable_names[1:])
]
if var_strs:
rep += f" + {' + '.join(var_strs)}"
return rep
class ProductAssignment(Assignment):
def __init__(self, *assignments, offset: float = 0.0):
super().__init__()
self.assignment = assignments
self.offset = offset
self.coefficients = np.ones(len(assignments))
def __call__(self, noise, *args, **kwargs):
args = self.parse_call_input(*args, **kwargs)
out = noise + self.offset
if args:
out += np.prod(args, axis=0)
return out
def __len__(self):
return len(self.coefficients)
def function_str(self, variable_names: Optional[Collection[str]] = None):
rep = "N"
var_strs = [
f"{assignment.function_str([var])}"
for assignment, var in zip(self.assignment, variable_names[1:])
]
if var_strs:
rep += f" + {' * '.join(var_strs)}"
return rep
class HeinzeData:
_possible_values_ = dict(
sample_size=[100, 200, 500, 2000, 5000],
target=[f"X_{i}" for i in range(6)],
noise_df=[2, 3, 5, 10, 20, 50, 100],
multiplicative=[True, False],
shift=[True, False],
meanshift=[0, 0.1, 0.2, 0.5, 1, 2, 5, 10],
strength=[0, 0.1, 0.2, 0.5, 1, 2, 5, 10],
mechanism=[
IdentityAssignment,
MaxAssignment,
SignSqrtAssignment,
SinAssignment,
],
interventions=["all", "rand", "close"],
)
def __init__(
self, config: Optional[Dict[str, Any]] = None, seed: Optional[int] = None
):
self.seed = seed
self.rng = np.random.default_rng(self.seed)
self.config = (
self.draw_config() if config is None else self.verify_config(config)
)
self.scm = self.get_scm()
self.intervention_values = dict()
def verify_config(self, config: Dict[str, Any]):
for key in config.keys():
if config[key] not in self._possible_values_[key]:
raise ValueError(
f"Value '{config[key]}' of key '{key}' not within range of allowed values."
)
return config
def draw_config(self):
rng = np.random.default_rng(self.seed)
poss_vals = self._possible_values_
config = dict()
# uniform draws
for param in self._possible_values_.keys():
config[param] = rng.choice(poss_vals[param])
return config
def get_scm(self, noise_seed: Optional[int] = None):
config = self.config
assignment_map = dict()
def get_seed(i):
if noise_seed is not None:
return noise_seed + i
return None
binary_op_assignment = (
ProductAssignment if config["multiplicative"] else SumAssignment
)
mechanism = config["mechanism"]
coeffs = dict()
coeffs[(0, 1)] = 1
coeffs[(0, 2)] = 1
coeffs[(1, 2)] = -1
coeffs[(2, 3)] = -1
coeffs[(3, 5)] = -1
coeffs[(2, 5)] = 1
coeffs[(4, 5)] = 1
df = config["noise_df"]
assignment_map["X_0"] = (
[],
binary_op_assignment(),
NoiseGenerator("standard_t", df=df, seed=get_seed(0)),
)
assignment_map["X_1"] = (
["X_0"],
binary_op_assignment(mechanism(coefficient=coeffs[0, 1])),
NoiseGenerator("standard_t", df=df, seed=get_seed(1)),
)
assignment_map["X_2"] = (
["X_0", "X_1"],
binary_op_assignment(
mechanism(coefficient=coeffs[0, 2]), mechanism(coefficient=coeffs[1, 2])
),
NoiseGenerator("standard_t", df=df, seed=get_seed(2)),
)
assignment_map["X_3"] = (
["X_2"],
binary_op_assignment(mechanism(coefficient=coeffs[2, 3])),
NoiseGenerator("standard_t", df=df, seed=get_seed(3)),
)
assignment_map["X_4"] = (
[],
binary_op_assignment(),
NoiseGenerator("standard_t", df=df, seed=get_seed(4)),
)
assignment_map["X_5"] = (
["X_3", "X_2", "X_4"],
binary_op_assignment(
mechanism(coefficient=coeffs[3, 5]),
mechanism(coefficient=coeffs[2, 5]),
mechanism(coefficient=coeffs[4, 5]),
),
NoiseGenerator("standard_t", df=df, seed=get_seed(5)),
)
return SCM(assignment_map)
def set_intervention_values(self, intervention_number: int = 1):
try:
return self.intervention_values[intervention_number]
except KeyError:
config = self.config
interv_setting = config["interventions"]
target = config["target"]
meanshift = config["meanshift"]
scale = config["strength"]
if interv_setting == "all":
variables = [var for var in self.scm.graph.nodes if var != target]
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
elif interv_setting == "rand":
parents = list(self.scm[target][0])
descendants = list(
nx.algorithms.dag.descendants(self.scm.graph, target)
)
parent = [self.rng.choice(parents)] if parents else []
descendant = [self.rng.choice(descendants)] if descendants else []
variables = parent + descendant
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
else:
parents = list(self.scm[target][0])
children = list(self.scm.graph.successors(target))
parent = [self.rng.choice(parents)] if parents else []
child = [self.rng.choice(children)] if children else []
variables = parent + child
values = (
self.rng.standard_t(size=len(variables), df=config["noise_df"])
* scale
+ meanshift
)
self.intervention_values[intervention_number] = variables, values
def set_intervention(self, intervention_number: int):
variables, values = self.intervention_values[intervention_number]
if self.config["shift"]:
new_assignments = {
var: {"assignment": copy.deepcopy(self.scm[var][1]["assignment"])}
for var in variables
}
for (var, items), value in zip(new_assignments.items(), values):
items["assignment"].coefficient = value
self.scm.intervention(interventions=new_assignments)
else:
self.scm.do_intervention(variables, values)
def sample(self) -> Tuple[pd.DataFrame, str, np.ndarray]:
self.set_intervention_values(1)
self.set_intervention_values(2)
sample_size = self.config["sample_size"]
obs = [self.scm.sample(sample_size)]
envs = [0] * sample_size
vars = sorted(self.scm.get_variables())
for i in range(1, 3):
self.set_intervention(i)
obs.append(self.scm.sample(sample_size)[vars])
self.scm.undo_intervention()
envs += [i] * sample_size
obs = | pd.concat(obs, sort=True) | pandas.concat |
import pandas as pd
from settings import settings
from gensim.models.word2vec import Word2Vec
from utils import get_sequence, get_blocks, check_path
def generate_embeddings(ast_path, pairs_path, size=settings.vec_size):
source = pd.read_pickle(ast_path)
pairs = | pd.read_pickle(pairs_path) | pandas.read_pickle |
"""Construct the clean data set"""
import pandas as pd
from pathlib import PurePath
import numpy as np
import datetime as dt
from pandas.tseries.holiday import USFederalHolidayCalendar
from scipy.interpolate import interp1d
from sklearn.svm import SVR
#========================================================================#
# interpolation functions #
#========================================================================#
def det_interp(x, kind='linear'):
""" A helper function for deterministic time seres interpolation
Args
----
x -- a dummy variable for the dataframe's columns
kwargs
------
kind -- one of scipy.interpolate.inter1d kwargs
return
------
interpolated values of the whole time series
"""
index = pd.Series(np.arange(x.shape[0]), index=x.index)
notnull = pd.notnull(x)
t = index[notnull]
y = x[notnull]
f = interp1d(t.values, y.values, kind=kind)
return pd.Series(f(index), index=x.index, name=x.name)
def ml_interp(x, model, **model_kwargs):
""" A helper function for ML time seres interpolation
Args
----
x -- a dummy variable for the dataframe's columns
model -- a scikit learn model class
model_kwargs -- tuple of kwargs to pass to the model constructor
return
------
interpolated values of the whole time series
"""
index = pd.Series(np.arange(x.shape[0]), index=x.index)
notnull = pd.notnull(x)
t = index[notnull].values.reshape(-1, 1)
y = x[notnull]
regr = model(**model_kwargs)
regr.fit(t, y)
yhat = regr.predict(index.values.reshape(-1, 1))
return pd.Series(yhat, index=x.index, name=x.name)
def main():
#=====================================================================#
# Data import #
#=====================================================================#
root = PurePath()
raw_data = root / 'raw_data'
sentiment = root / 'sentiment_analysis'
# files
economics_file = 'economics.csv'
yields_file = 'FED-SVENY.csv'
sentiment_file = root / 'daily_sentiment_score.csv'
# import data
economics = pd.read_csv(raw_data / economics_file)
yields = pd.read_csv(raw_data / yields_file)
sent = pd.read_csv(sentiment / sentiment_file)
#=====================================================================#
# clean data #
#=====================================================================#
economics.index = pd.to_datetime(economics['sasdate'],
format="%m/%d/%Y")
economics = economics.iloc[:,1:] # drop date column
# nan strategy is to drop as of now
economics = economics[~(economics.apply(np.isnan)).apply(any, axis=1)]
economics = economics.iloc[:-9,:] # done by inspection
yields.index = | pd.to_datetime(yields['Date'], format="%Y-%m-%d") | pandas.to_datetime |
import json
import signal
from functools import wraps
from time import time
import warnings
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
warnings.simplefilter("ignore")
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, timeout_handler)
def timeit(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
start = time()
signal.alarm(60)
try:
result = fn(*args, **kwargs)
except TimeoutException:
pass
else:
signal.alarm(60)
return time() - start
return wrapper
with open("tests/test_data_py_vollib.json", "rb") as f:
d = json.load(f)
test_df = pd.DataFrame(d["data"], index=d["index"], columns=d["columns"])
test_df_calls = test_df.copy()
test_df_calls["flag"] = "c"
test_df_calls["q"] = 0
test_df_puts = test_df.copy()
test_df_puts["flag"] = "p"
test_df_puts["q"] = 0
test_df = | pd.concat((test_df_calls.iloc[:100], test_df_puts.iloc[:100]), 0) | pandas.concat |
import sys
import os
import traceback
from shapely.geometry import Point
import core.download as dlf
import pandas as pd
import geopandas as gpd
def err_to_parent(UDF):
def handling(connection, load, message):
try:
UDF(connection, load, message)
except Exception as e:
connection.send({"BREAK": e,
"TB": traceback.format_exc()})
handling.__name__ = UDF.__name__
return handling
@err_to_parent
def simple_groupby(connection, load, message):
"""
Return the result of a Pandas groupby method for
the load dataframe.
Inputs:
load (must be DataFrame or GeoDataFrame)
message['col_name'], the series to group by
message['groupby_method'], may be count, mean, sum, or median
Output: sends the result of the method back up the pipe
"""
if 'groupby_method' in message:
groupby_method = message['groupby_method']
else:
raise ValueError('groupby_method must be sum, count, mean, or median')
if 'col_name' not in message:
raise ValueError("message['col_name'] not defined")
if type(load).__name__ not in ['DataFrame', 'GeoDataFrame']:
raise TypeError('load must be DataFrame or GeoDataFrame, not {}'.format(
type(load).__name__))
valid_methods = ['sum', 'count', 'mean', 'median']
if groupby_method not in(valid_methods):
raise ValueError(
'groupby must be sum, count, mean, or median , not {}'.format(
groupby_method))
if groupby_method == 'sum':
grouped = load.groupby(message['col_name']).sum()
if groupby_method == 'count':
grouped = load.groupby(message['col_name']).count()
if groupby_method == 'mean':
grouped = load.groupby(message['col_name']).mean()
if groupby_method == 'median':
grouped = load.groupby(message['col_name']).median()
connection.send(grouped)
@err_to_parent
def points_from_latlong(connection, load, message):
"""
Render a lat and long column to shapely points.
Inputs: connection, load (a DataFrame or GeoDataFrame), message (see below)
Message:
message['lat'], the key/column name for lattitude listlike
message['long'], the key/column name for the longitude listlike
message['out_column'], name of new points column to be created
Output: sends the load back up the pipe with a new points column
"""
long = message['long']
lat = message['lat']
load[message['out_column']] = [Point(x, y) for x, y in zip(
load[long], load[lat]
)]
connection.send(load)
@err_to_parent
def col_to_datetime (connection, load=None, message=None):
#turn list of columns into a date time object
if type(load).__name__ == 'DataFrame':
if not isinstance(message['col_to_datetime'], list):
raise TypeError("message['col_to_datetime'] must be a list")
if message['col_to_datetime'] == []:
raise ValueError(
'List of columns to be converted to datetime object is empty')
for i in message['col_to_datetime']:
load[i] = | pd.to_datetime(i) | pandas.to_datetime |
# Choose a Top Performer of ETF from previous week
## https://www.etf.com/etfanalytics/etf-finder
etf_list = ['QQQ', 'QLD', 'TQQQ', 'GDXD', 'SPY']
# Get best ticker performance of past 1 week
# def best_etf(etf_list):
# best_ticker_performance = 0
# best_ticker = ''
# for ticker in etf_list:
# ticker_yahoo = yf.Ticker(ticker)
# data = ticker_yahoo.history()
# last_quote = (data.tail(1)['Close'].iloc[0])
# last_7th_quote = (data.tail(7)['Close'].iloc[0]) # 7 means last 7 days
# last_week_performance = (last_quote - last_7th_quote) / last_7th_quote * 100
# if last_week_performance > best_ticker_performance:
# best_ticker_performance = last_week_performance
# best_ticker = ticker
# return(best_ticker, round(best_ticker_performance, 2))
import json
import requests
import pandas as pd
endpoint = "https://data.alpaca.markets/v1"
headers = json.loads(open("key.txt", 'r').read())
tickers = "ROXIF,EDU,TSP,PANW,PDD,BEKE,TAL,QFIN,TME,MPNGF,DADA,JD,DIDI,MPNGY,VNET,YY,ZH,YMM,BZ,CURV,FUTU,MLCO,TIGR,VIPS,FNMA"
def hist_data(symbols, timeframe="15Min", limit=200, start="", end="", after="", until=""):
"""
Returns historical bar data for a string of symbols seperated by comma.
Symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG".
"""
df_data = {}
bar_url = endpoint + "/bars/{}".format(timeframe)
params = {"symbols" : symbols,
"limit" : limit,
"start" : start,
"end" : end,
"after" : after,
"until" : until}
r = requests.get(bar_url, headers=headers, params=params)
json_dump = r.json()
for symbol in json_dump:
temp = | pd.DataFrame(json_dump[symbol]) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in | range(n) | pandas.compat.range |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 08:53:30 2019
@author: rhou
"""
import warnings
warnings.filterwarnings("ignore")
import os, sys
import argparse
import matplotlib
matplotlib.use('agg')
import pandas as pd
import numpy as np
try:
import seaborn as sns
except ImportError:
sys.exit('\n\nError: seaborn module is missing, please install it before proceeding.')
try:
import igraph as ig
except ImportError:
sys.exit('\n\nError: igraph module is missing, please install it before proceeding.')
try:
import networkx as nx
except ImportError:
sys.exit('\n\nError: NetworkX module is missing, please install it before proceeding.')
try:
import pygraphviz as pgv
except ImportError:
sys.exit('\n\nError: PyGraphviz module is missing, please install it before proceeding.')
#filter adjacency matrix
def ChooseTopEdges(adjM, keepTopEdge):
if keepTopEdge == 0:
return adjM
edgeDict = {'s':[],'t':[],'v':[]}
for idx in adjM.index:
for col in adjM.columns:
edgeDict['s'].append(idx)
edgeDict['t'].append(col)
if adjM.loc[idx,col] <=0:
edgeDict['v'].append((-1.0) * adjM.loc[idx,col])
else:
edgeDict['v'].append(adjM.loc[idx,col])
edgeD = pd.DataFrame(edgeDict).sort_values(by=['v'], ascending=False)
edgeD = edgeD.head(keepTopEdge)
nadjM = pd.DataFrame(0.0, index=adjM.index,columns=adjM.index)
for idx in edgeD.index:
nadjM.loc[edgeD.loc[idx,['s']],edgeD.loc[idx,['t']]] = adjM.loc[edgeD.loc[idx,['s']],edgeD.loc[idx,['t']]]
return nadjM
# build delta adjacency matrix
def BuildDeltaAdjM(edgeDF, origlabels, labels, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge):
edgeDF['Sending cluster'] = edgeDF['Sending cluster'].astype(str)
edgeDF['Target cluster'] = edgeDF['Target cluster'].astype(str)
adjM1 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjSpecM1 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjCountM1 = pd.DataFrame(0, index=origlabels, columns=origlabels)
adjM2 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjSpecM2 = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjCountM2 = pd.DataFrame(0, index=origlabels, columns=origlabels)
for idx in edgeDF.index:
if (edgeDF.loc[idx, 'Ligand detection rate in condition 1']>frequencyThreshold)&(edgeDF.loc[idx, 'Receptor detection rate in condition 1']>frequencyThreshold):
adjM1.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += edgeDF.loc[idx,'Edge expression weight in condition 1']
adjSpecM1.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += edgeDF.loc[idx,'Edge specificity weight in condition 1']
adjCountM1.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += 1
if (edgeDF.loc[idx, 'Ligand detection rate in condition 2']>frequencyThreshold)&(edgeDF.loc[idx, 'Receptor detection rate in condition 2']>frequencyThreshold):
adjM2.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += edgeDF.loc[idx,'Edge expression weight in condition 2']
adjSpecM2.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += edgeDF.loc[idx,'Edge specificity weight in condition 2']
adjCountM2.loc[str(edgeDF.loc[idx,'Sending cluster']), str(edgeDF.loc[idx,'Target cluster'])] += 1
# remove isolated nodes
adjM1.index = labels
adjM1.columns = labels
adjSpecM1.index = labels
adjSpecM1.columns = labels
adjCountM1.index = labels
adjCountM1.columns = labels
ilist1 = adjM1.index[adjM1.max(axis=1)>0]
clist1 = adjM1.columns[adjM1.max(axis=0)>0]
nlist1 = sorted(list(set(ilist1).union(set(clist1))))
adjM2.index = labels
adjM2.columns = labels
adjSpecM2.index = labels
adjSpecM2.columns = labels
adjCountM2.index = labels
adjCountM2.columns = labels
ilist2 = adjM2.index[adjM2.max(axis=1)>0]
clist2 = adjM2.columns[adjM2.max(axis=0)>0]
nlist2 = sorted(list(set(ilist2).union(set(clist2))))
nlist = sorted(list(set(nlist1).union(set(nlist2))))
adjM1 = adjM1.loc[nlist,nlist]
adjSpecM1 = adjSpecM1.loc[nlist,nlist]
adjCountM1 = adjCountM1.loc[nlist,nlist]
adjM2 = adjM2.loc[nlist,nlist]
adjSpecM2 = adjSpecM2.loc[nlist,nlist]
adjCountM2 = adjCountM2.loc[nlist,nlist]
#to average expression
for idx in nlist:
for col in nlist:
if adjCountM1.loc[idx,col] != 0:
adjM1.loc[idx,col] = adjM1.loc[idx,col]/adjCountM1.loc[idx,col]
if adjCountM2.loc[idx,col] != 0:
adjM2.loc[idx,col] = adjM2.loc[idx,col]/adjCountM2.loc[idx,col]
adjMD = adjM2 - adjM1
adjSpecMD = adjSpecM2 - adjSpecM1
adjCountMD = adjCountM2 - adjCountM1
adjM1 = ChooseTopEdges(adjM1, keepTopEdge)
adjSpecM1 = ChooseTopEdges(adjSpecM1, keepTopEdge)
adjCountM1 = ChooseTopEdges(adjCountM1, keepTopEdge)
adjM2 = ChooseTopEdges(adjM2, keepTopEdge)
adjSpecM2 = ChooseTopEdges(adjSpecM2, keepTopEdge)
adjCountM2 = ChooseTopEdges(adjCountM2, keepTopEdge)
adjMD = ChooseTopEdges(adjMD, keepTopEdge)
adjSpecMD = ChooseTopEdges(adjSpecMD, keepTopEdge)
adjCountMD = ChooseTopEdges(adjCountMD, keepTopEdge)
ilist1 = adjM1.index[adjM1.max(axis=1)>0]
clist1 = adjM1.columns[adjM1.max(axis=0)>0]
nlist1 = sorted(list(set(ilist1).union(set(clist1))))
ilist2 = adjM2.index[adjM2.max(axis=1)>0]
clist2 = adjM2.columns[adjM2.max(axis=0)>0]
nlist2 = sorted(list(set(ilist2).union(set(clist2))))
ilist3 = adjMD.index[adjMD.max(axis=1)>0]
clist3 = adjMD.columns[adjMD.max(axis=0)>0]
nlist3 = sorted(list(set(ilist3).union(set(clist3))))
nlist = sorted(list(set(nlist1).union(set(nlist2)).union(set(nlist3))))
adjM1 = adjM1.loc[nlist,nlist]
adjM2 = adjM2.loc[nlist,nlist]
adjMD = adjMD.loc[nlist,nlist]
ilist1 = adjSpecM1.index[adjSpecM1.max(axis=1)>0]
clist1 = adjSpecM1.columns[adjSpecM1.max(axis=0)>0]
nlist1 = sorted(list(set(ilist1).union(set(clist1))))
ilist2 = adjSpecM2.index[adjSpecM2.max(axis=1)>0]
clist2 = adjSpecM2.columns[adjSpecM2.max(axis=0)>0]
nlist2 = sorted(list(set(ilist2).union(set(clist2))))
ilist3 = adjSpecMD.index[adjSpecMD.max(axis=1)>0]
clist3 = adjSpecMD.columns[adjSpecMD.max(axis=0)>0]
nlist3 = sorted(list(set(ilist3).union(set(clist3))))
nlist = sorted(list(set(nlist1).union(set(nlist2)).union(set(nlist3))))
adjSpecM1 = adjSpecM1.loc[nlist,nlist]
adjSpecM2 = adjSpecM2.loc[nlist,nlist]
adjSpecMD = adjSpecMD.loc[nlist,nlist]
ilist1 = adjCountM1.index[adjCountM1.max(axis=1)>0]
clist1 = adjCountM1.columns[adjCountM1.max(axis=0)>0]
nlist1 = sorted(list(set(ilist1).union(set(clist1))))
ilist2 = adjCountM2.index[adjCountM2.max(axis=1)>0]
clist2 = adjCountM2.columns[adjCountM2.max(axis=0)>0]
nlist2 = sorted(list(set(ilist2).union(set(clist2))))
ilist3 = adjCountMD.index[adjCountMD.max(axis=1)>0]
clist3 = adjCountMD.columns[adjCountMD.max(axis=0)>0]
nlist3 = sorted(list(set(ilist3).union(set(clist3))))
nlist = sorted(list(set(nlist1).union(set(nlist2)).union(set(nlist3))))
adjCountM1 = adjCountM1.loc[nlist,nlist]
adjCountM2 = adjCountM2.loc[nlist,nlist]
adjCountMD = adjCountMD.loc[nlist,nlist]
nxgW1 = nx.MultiDiGraph(adjM1)
nxgS1 = nx.MultiDiGraph(adjSpecM1)
nxgC1 = nx.MultiDiGraph(adjCountM1)
nxgW2 = nx.MultiDiGraph(adjM2)
nxgS2 = nx.MultiDiGraph(adjSpecM2)
nxgC2 = nx.MultiDiGraph(adjCountM2)
nxgWD = nx.MultiDiGraph(adjMD)
nxgSD = nx.MultiDiGraph(adjSpecMD)
nxgCD = nx.MultiDiGraph(adjCountMD)
return edgeDF, adjM1, adjSpecM1, adjCountM1, nxgW1, nxgS1, nxgC1, adjM2, adjSpecM2, adjCountM2, nxgW2, nxgS2, nxgC2, adjMD, adjSpecMD, adjCountMD, nxgWD, nxgSD, nxgCD
# build adjacency matrix
def BuildAdjM(edgeDF, origlabels, labels, specificityThreshold, weightThreshold, keepTopEdge):
# only keep edges of interest
if 'delta specificity' in edgeDF.columns:
edgeDF = edgeDF.loc[(edgeDF['delta specificity']>specificityThreshold)&(edgeDF['delta weight']>weightThreshold),]
else:
edgeDF = edgeDF.loc[(edgeDF['product of specified']>specificityThreshold)&(edgeDF['original ligand']>weightThreshold)&(edgeDF['original receptor']>weightThreshold),]
edgeDF['sending cluster name'] = edgeDF['sending cluster name'].astype(str)
edgeDF['target cluster name'] = edgeDF['target cluster name'].astype(str)
adjM = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjSpecM = pd.DataFrame(0.0, index=origlabels, columns=origlabels)
adjCountM = pd.DataFrame(0, index=origlabels, columns=origlabels)
for idx in edgeDF.index:
if 'delta specificity' in edgeDF.columns:
adjM.loc[str(edgeDF.loc[idx,'sending cluster name']), str(edgeDF.loc[idx,'target cluster name'])] += edgeDF.loc[idx,'delta weight']
adjSpecM.loc[str(edgeDF.loc[idx,'sending cluster name']), str(edgeDF.loc[idx,'target cluster name'])] += edgeDF.loc[idx,'delta specificity']
else:
adjM.loc[str(edgeDF.loc[idx,'sending cluster name']), str(edgeDF.loc[idx,'target cluster name'])] += edgeDF.loc[idx,'product of original']
adjSpecM.loc[str(edgeDF.loc[idx,'sending cluster name']), str(edgeDF.loc[idx,'target cluster name'])] += edgeDF.loc[idx,'product of specified']
adjCountM.loc[str(edgeDF.loc[idx,'sending cluster name']), str(edgeDF.loc[idx,'target cluster name'])] += 1
adjM.index = labels
adjM.columns = labels
adjSpecM.index = labels
adjSpecM.columns = labels
adjCountM.index = labels
adjCountM.columns = labels
ilist = adjM.index[adjM.max(axis=1)>0]
clist = adjM.columns[adjM.max(axis=0)>0]
nlist = sorted(list(set(ilist).union(set(clist))))
adjM = adjM.loc[nlist,nlist]
adjSpecM = adjSpecM.loc[nlist,nlist]
adjCountM = adjCountM.loc[nlist,nlist]
adjM = ChooseTopEdges(adjM, keepTopEdge)
ilist = adjM.index[adjM.max(axis=1)>0]
clist = adjM.columns[adjM.max(axis=0)>0]
nlist = sorted(list(set(ilist).union(set(clist))))
adjM = adjM.loc[nlist,nlist]
adjSpecM = ChooseTopEdges(adjSpecM, keepTopEdge)
ilist = adjSpecM.index[adjSpecM.max(axis=1)>0]
clist = adjSpecM.columns[adjSpecM.max(axis=0)>0]
nlist = sorted(list(set(ilist).union(set(clist))))
adjSpecM = adjSpecM.loc[nlist,nlist]
adjCountM = ChooseTopEdges(adjCountM, keepTopEdge)
ilist = adjCountM.index[adjCountM.max(axis=1)>0]
clist = adjCountM.columns[adjCountM.max(axis=0)>0]
nlist = sorted(list(set(ilist).union(set(clist))))
adjCountM = adjCountM.loc[nlist,nlist]
nxgW = nx.MultiDiGraph(adjM)
nxgS = nx.MultiDiGraph(adjSpecM)
nxgC = nx.MultiDiGraph(adjCountM)
return edgeDF, adjM, adjSpecM, adjCountM, nxgW, nxgS, nxgC
def IgraphFromAdjacency(adjM, layout, labels, cltSizes, clusterDistance):
# insert nodes and edges into the graph object
g = ig.Graph(directed=True)
g.add_vertices(adjM.shape[0])
nNameList = []
nWeightList = []
for nidx in adjM.index:
tidx = labels.index(nidx)
nNameList.append(labels[tidx])
nWeightList.append(cltSizes[tidx])
g.vs["name"] = nNameList
g.vs["weight"] = nWeightList
edgeList = []
edgeWeightList = []
for s in range(len(adjM)):
for t in range(len(adjM)):
if adjM.iloc[s,t] > 0:
edgeList.append((s,t))
edgeWeightList.append(adjM.iloc[s,t])
g.add_edges(edgeList)
g.es['weight'] = edgeWeightList
# set node positions based on the layout
if layout == 'circle' or layout == 'sphere':
pos_list = reversed(g.layout(layout).coords)
else:
# set the seed to make the graph reproducable
np.random.seed(0)
init_coords = np.random.random((len(adjM), 2)).tolist()
try:
pos_list = g.layout(layout, seed=init_coords, weights='weight',).coords
except Exception:
# hack for excepting attribute error for empty graphs...
try:
pos_list = g.layout(layout, seed=init_coords).coords
except Exception:
pos_list = g.layout(layout).coords
posDict = {n: [p[0]*clusterDistance, -p[1]*clusterDistance] for n, p in enumerate(pos_list)}
return posDict
def DrawDeltaHeatmap(readmeStr, typeStr, numStr, tempM1, tempM2, tempMD, tempMF, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat):
# draw networks of both datasets
newlbls = []
for idx in tempM1.index:
newlbls.append(idx.split('\n')[0])
oldidx = tempM1.index
tempM1.index = newlbls
tempM2.index = newlbls
tempMD.index = newlbls
tempMF.index = newlbls
newcts = []
for col in tempM1.columns:
newcts.append(col.split('\n')[0])
oldcol = tempM1.columns
tempM1.columns = newcts
tempM2.columns = newcts
tempMD.columns = newcts
tempMF.columns = newcts
import matplotlib.colors as colors
BuOnly = colors.LinearSegmentedColormap.from_list("", ["#FFFFFF","#0000FF"])
RdOnly = colors.LinearSegmentedColormap.from_list("", ["#FFFFFF","#FF0000"])
import matplotlib.pyplot as plt
figWidth = max(tempM1.shape)*fontSize/30
f, ax = plt.subplots(figsize=(figWidth, figWidth))
if numStr == 'int':
if tempM1.max().max() < 500:
g = sns.heatmap(tempM1, square=True, annot=True, fmt="d", linewidths=.5, ax=ax, cmap=RdOnly)
else:
g = sns.heatmap(tempM1, square=True, annot=False, linewidths=.5, ax=ax, cmap=RdOnly)
else:
g = sns.heatmap(tempM1, square=True, annot=False, linewidths=.5, ax=ax, cmap=RdOnly)
ax.set_xlabel('Cell-type expressing receptor (receiving)')
ax.set_ylabel('Cell-type expressing ligand (sending)')
fig = g.get_figure()
plotFileName = 'heatmap_cond1_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'heatmap_cond1_%s-based.%s: cluster-to-cluster communication heatmap in condition 1, the element is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
fig.savefig(plotFileName, bbox_inches = "tight")
f, ax = plt.subplots(figsize=(figWidth, figWidth))
if numStr == 'int':
if tempM2.max().max() < 500:
g = sns.heatmap(tempM2, square=True, annot=True, fmt="d", linewidths=.5, ax=ax, cmap=BuOnly)
else:
g = sns.heatmap(tempM2, square=True, annot=False, linewidths=.5, ax=ax, cmap=BuOnly)
else:
g = sns.heatmap(tempM2, square=True, annot=False, linewidths=.5, ax=ax, cmap=BuOnly)
ax.set_xlabel('Cell-type expressing receptor (receiving)')
ax.set_ylabel('Cell-type expressing ligand (sending)')
fig = g.get_figure()
plotFileName = 'heatmap_cond2_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'heatmap_cond2_%s-based.%s: cluster-to-cluster communication heatmap in condition 2, the element is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
fig.savefig(plotFileName, bbox_inches = "tight")
import numpy as np
ttempMF = tempMF.copy()
for ttidx in ttempMF.index:
for ttcol in ttempMF.columns:
if tempMD.loc[ttidx,ttcol] < 0:
ttempMF.loc[ttidx,ttcol] = np.log2(ttempMF.loc[ttidx,ttcol]) * -1.0
else:
ttempMF.loc[ttidx,ttcol] = np.log2(ttempMF.loc[ttidx,ttcol])
# draw delta networks
f, ax = plt.subplots(figsize=(figWidth, figWidth))
if numStr == 'int':
#red color for down
g = sns.heatmap(tempMD.astype(float), mask=ttempMF < 1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#0000FF']), square=True, annot=True, fmt=".0f", linewidths=.5, ax=ax)
#blue color for up
g = sns.heatmap(tempMD.astype(float), mask=ttempMF > -1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#FF0000']), square=True, annot=True, fmt=".0f", linewidths=.5, ax=ax)
#background color
g = sns.heatmap(tempMD.astype(float), mask=(ttempMF > 1.0)|(ttempMF < -1.0), cbar=False, cmap=matplotlib.colors.ListedColormap(['#FFFF00']), square=True, annot=True, fmt=".0f", linewidths=.5, ax=ax)
else:
#red color for down
g = sns.heatmap(tempMD, mask=ttempMF < 1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#0000FF']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
#blue color for up
g = sns.heatmap(tempMD, mask=ttempMF > -1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#FF0000']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
#background color
g = sns.heatmap(tempMD, mask=(ttempMF > 1.0)|(ttempMF < -1.0), cbar=False, cmap=matplotlib.colors.ListedColormap(['#FFFF00']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
ax.set_xlabel('Cell-type expressing receptor (receiving)')
ax.set_ylabel('Cell-type expressing ligand (sending)')
fig = g.get_figure()
plotFileName = 'heatmap_delta_diff_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'heatmap_delta_diff_%s-based.%s: cluster-to-cluster communication heatmap in which the element is the difference of %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
fig.savefig(plotFileName, bbox_inches = "tight")
# draw fold change networks
f, ax = plt.subplots(figsize=(figWidth, figWidth))
#red color for down
g = sns.heatmap(ttempMF, mask=ttempMF < 1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#0000FF']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
#blue color for up
g = sns.heatmap(ttempMF, mask=ttempMF > -1.0, cbar=False, cmap=matplotlib.colors.ListedColormap(['#FF0000']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
#background color
g = sns.heatmap(ttempMF, mask=(ttempMF > 1.0)|(ttempMF < -1.0), cbar=False, cmap=matplotlib.colors.ListedColormap(['#FFFF00']), square=True, annot=True, fmt=".2f", linewidths=.5, ax=ax)
ax.set_xlabel('Cell-type expressing receptor (receiving)')
ax.set_ylabel('Cell-type expressing ligand (sending)')
fig = g.get_figure()
plotFileName = 'heatmap_fold_change_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'heatmap_fold_change_%s-based.%s: cluster-to-cluster communication heatmap in which the element is the log2 transformed fold change of %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
fig.savefig(plotFileName, bbox_inches = "tight")
tempM1.columns = oldcol
tempM1.index = oldidx
tempM2.columns = oldcol
tempM2.index = oldidx
tempMD.columns = oldcol
tempMD.index = oldidx
tempMF.columns = oldcol
tempMF.index = oldidx
return readmeStr
def DrawDeltaGraphvizPlot(readmeStr, typeStr, numStr, nxgS1, adjSpecM1, nxgS2, adjSpecM2, nxgSD, adjSpecMD, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDict, cltFDdict, labels, specificityThreshold, weightThreshold, frequencyThreshold, interDB, weightType, layout, plotFormat):
# draw networks of both datasets
# convert to a graphviz graph
nxgS1 = nx.nx_agraph.to_agraph(nxgS1)
nxgS1.graph_attr.update(fontname = "Arial")
nxgS1.graph_attr.update(fontsize = fontSize)
nxgS1.graph_attr.update(margin = 0)
nxgS1.graph_attr.update(ratio="compress")
nxgS1.graph_attr.update(label='Network in condition 1\nEdge weight: ' + typeStr)
nxgS1.graph_attr.update(size = "%s,%s" % (plotWidth, plotHeight))
# set edge properties
maxVal = adjSpecM1.max().max()
for ed in nxgS1.edges():
sn = ed[0]
tn = ed[1]
ed.attr['color'] = '#FF0000'
if edgeWidth == 0:
ed.attr['fontsize'] = fontSize
ed.attr['fontname'] = "Arial"
if numStr == 'float':
ed.attr['label'] = '%.2f' % (adjSpecM1.loc[sn, tn])
elif numStr == 'int':
ed.attr['label'] = '%d' % (adjSpecM1.loc[sn, tn])
else:
if adjSpecM1.loc[sn, tn]*edgeWidth/maxVal < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(adjSpecM1.loc[sn, tn]*edgeWidth/maxVal)
# set node color
nxgS1.node_attr['style']='filled,setlinewidth(0)'
idx = 0
maxCltSize = max(cltSizeDict.values())
for lb in labels:
try:
nd=nxgS1.get_node(lb)
except:
continue
nd.attr['shape'] = 'circle'
if maxClusterSize != 0:
radis = cltSizeDict[nd]*maxClusterSize/maxCltSize
nd.attr['width'] = str(round(radis,2))
nd.attr['height'] = str(round(radis,2))
nd.attr['fixedsize'] = 'true'
#newcol = [i+0.5*(1-i) for i in matplotlib.colors.to_rgb(colorDict[nd])]
#nd.attr['fillcolor'] = matplotlib.colors.to_hex(newcol)
nd.attr['fillcolor'] = colorDict[nd]
nd.attr['pin'] = 'true'
nd.attr['pos'] = '%s,%s' % (wposDict[idx][0]*plotWidth/2,wposDict[idx][1]*plotHeight/2)
nd.attr['fontsize'] = fontSize
nd.attr['fontname'] = "Arial"
idx += 1
plotFileName = 'network_cond1_%s-based_layout_%s.%s' % (typeStr, layout, plotFormat)
readmeStr += 'network_cond1_%s-based_layout_xx.%s: cluster-to-cluster communication network in condition 1, the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
warnings.simplefilter("error")
while True:
try:
nxgS1.draw(plotFileName,prog='fdp')
break
except RuntimeWarning as rw:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
if len(errorNodeList)==0:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
nxgS1.draw(plotFileName,prog='neato')
break
for nd in nxgS1.nodes():
if str(nd) in errorNodeList:
nd.attr['xlabel'] = str(nd)
nd.attr['label'] = ''
continue
warnings.simplefilter("ignore")
nxgS2 = nx.nx_agraph.to_agraph(nxgS2)
nxgS2.graph_attr.update(fontname = "Arial")
nxgS2.graph_attr.update(fontsize = fontSize)
nxgS2.graph_attr.update(margin = 0)
nxgS2.graph_attr.update(ratio="compress")
nxgS2.graph_attr.update(label='Network in condition 2\nEdge weight: ' + typeStr)
nxgS2.graph_attr.update(size = "%s,%s" % (plotWidth, plotHeight))
# set edge properties
maxVal = adjSpecM2.max().max()
for ed in nxgS2.edges():
sn = ed[0]
tn = ed[1]
ed.attr['color'] = '#0000FF'
if edgeWidth == 0:
ed.attr['fontsize'] = fontSize
ed.attr['fontname'] = "Arial"
if numStr == 'float':
ed.attr['label'] = '%.2f' % (adjSpecM2.loc[sn, tn])
elif numStr == 'int':
ed.attr['label'] = '%d' % (adjSpecM2.loc[sn, tn])
else:
if adjSpecM2.loc[sn, tn]*edgeWidth/maxVal < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(adjSpecM2.loc[sn, tn]*edgeWidth/maxVal)
# set node color
nxgS2.node_attr['style']='filled,setlinewidth(0)'
idx = 0
maxCltSize = max(cltSizeDict.values())
for lb in labels:
try:
nd=nxgS2.get_node(lb)
except:
continue
nd.attr['shape'] = 'circle'
if maxClusterSize != 0:
radis = cltSizeDict[nd]*maxClusterSize/maxCltSize
nd.attr['width'] = str(round(radis,2))
nd.attr['height'] = str(round(radis,2))
nd.attr['fixedsize'] = 'true'
#newcol = [i+0.5*(1-i) for i in matplotlib.colors.to_rgb(colorDict[nd])]
#nd.attr['fillcolor'] = matplotlib.colors.to_hex(newcol)
nd.attr['fillcolor'] = colorDict[nd]
nd.attr['pin'] = 'true'
nd.attr['pos'] = '%s,%s' % (wposDict[idx][0]*plotWidth/2,wposDict[idx][1]*plotHeight/2)
nd.attr['fontsize'] = fontSize
nd.attr['fontname'] = "Arial"
idx += 1
plotFileName = 'network_cond2_%s-based_layout_%s.%s' % (typeStr, layout, plotFormat)
readmeStr += 'network_cond2_%s-based_layout_xx.%s: cluster-to-cluster communication network in condition 2, the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
warnings.simplefilter("error")
while True:
try:
nxgS2.draw(plotFileName,prog='fdp')
break
except RuntimeWarning as rw:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
if len(errorNodeList)==0:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
nxgS2.draw(plotFileName,prog='neato')
break
for nd in nxgS2.nodes():
if str(nd) in errorNodeList:
nd.attr['xlabel'] = str(nd)
nd.attr['label'] = ''
continue
warnings.simplefilter("ignore")
# set node color
maxCltSize = 0.0
for clt in cltFDdict.keys():
if maxCltSize < abs(cltFDdict[clt]):
maxCltSize = cltFDdict[clt]
cltSizeColorDict = {}
for clt in cltFDdict.keys():
if cltFDdict[clt] > 2:
# xf = cltFDdict[clt] / maxCltSize
# cltSizeColorDict[clt] = (high0[0]+deltah[0]*xf,high0[1]+deltah[1]*xf,high0[2]+deltah[2]*xf)
cltSizeColorDict[clt] = (0,0,1)
elif cltFDdict[clt] < -2:
# xf = cltFDdict[clt] / maxCltSize * -1.0
# cltSizeColorDict[clt] = (low0[0]+deltal[0]*xf,low0[1]+deltal[1]*xf,low0[2]+deltal[2]*xf)
cltSizeColorDict[clt] = (1,0,0)
else:
cltSizeColorDict[clt] = (1, 1, 0)
# draw delta networks
# convert to a graphviz graph
nxgSD = nx.nx_agraph.to_agraph(nxgSD)
nxgSD.graph_attr.update(fontname = "Arial")
nxgSD.graph_attr.update(fontsize = fontSize)
nxgSD.graph_attr.update(margin = 0)
nxgSD.graph_attr.update(ratio="compress")
nxgSD.graph_attr.update(label='Delta Network\nEdge weight: delta ' + typeStr)
nxgSD.graph_attr.update(size = "%s,%s" % (plotWidth, plotHeight))
# set edge properties
maxValP = adjSpecMD.max().max()
maxValN = adjSpecMD.min().min()
if maxValN >= 0:
maxValN = maxValP
for ed in nxgSD.edges():
sn = ed[0]
tn = ed[1]
if adjSpecMD.loc[sn, tn] < 0 and adjSpecM2.loc[sn, tn] > 0 and float(adjSpecM1.loc[sn, tn])/adjSpecM2.loc[sn, tn] > 2:
ed.attr['color'] = '#FF0000'
elif adjSpecMD.loc[sn, tn] > 0 and adjSpecM1.loc[sn, tn] > 0 and float(adjSpecM2.loc[sn, tn])/adjSpecM1.loc[sn, tn] > 2:
ed.attr['color'] = '#0000FF'
else:
ed.attr['color'] = '#FFFF00'
if adjSpecMD.loc[sn, tn] > 0 and ed.attr['color'] != '#FFFF00':#to red
#sclc = float(adjSpecMD.loc[sn, tn])/maxValP*1.0
#ed.attr['color'] = matplotlib.colors.to_hex([1.0, 1.0-sclc, 0.0])
#ed.attr['color'] = matplotlib.colors.to_hex((low0[0]+deltal[0]*sclc,low0[1]+deltal[1]*sclc,low0[2]+deltal[2]*sclc))
pass
elif adjSpecMD.loc[sn, tn] < 0 and ed.attr['color'] != '#FFFF00':
#sclc = float(adjSpecMD.loc[sn, tn])/maxValN*1.0
#ed.attr['color'] = matplotlib.colors.to_hex([1.0-sclc, 1.0, 0.0])
#ed.attr['color'] = matplotlib.colors.to_hex((high0[0]+deltah[0]*sclc,high0[1]+deltah[1]*sclc,high0[2]+deltah[2]*sclc))
pass
if edgeWidth == 0:
ed.attr['fontsize'] = fontSize
ed.attr['fontname'] = "Arial"
if numStr == 'float':
ed.attr['label'] = '%.2f' % (adjSpecMD.loc[sn, tn])
elif numStr == 'int':
ed.attr['label'] = '%d' % (adjSpecMD.loc[sn, tn])
else:
if adjSpecMD.loc[sn, tn] > 0:
if adjSpecMD.loc[sn, tn]*edgeWidth/maxValP < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(float(adjSpecMD.loc[sn, tn])*edgeWidth/maxValP)
else:
if adjSpecMD.loc[sn, tn]*edgeWidth/maxValN < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(float(adjSpecMD.loc[sn, tn])*edgeWidth/maxValN)
# set node color
nxgSD.node_attr['style']='filled,setlinewidth(0)'
idx = 0
maxCltSize = max(cltSizeDict.values())
for lb in labels:
try:
nd=nxgSD.get_node(lb)
except:
continue
nd.attr['shape'] = 'circle'
if maxClusterSize != 0:
radis = cltSizeDict[nd]*maxClusterSize/maxCltSize
nd.attr['width'] = str(round(radis,2))
nd.attr['height'] = str(round(radis,2))
nd.attr['fixedsize'] = 'true'
nd.attr['fillcolor'] = matplotlib.colors.to_hex(cltSizeColorDict[str(nd).split('\n')[0]])
nd.attr['pin'] = 'true'
nd.attr['pos'] = '%s,%s' % (wposDict[idx][0]*plotWidth/2,wposDict[idx][1]*plotHeight/2)
nd.attr['fontsize'] = fontSize
nd.attr['fontname'] = "Arial"
idx += 1
plotFileName = 'network_delta_diff_%s-based_layout_%s.%s' % (typeStr, layout, plotFormat)
readmeStr += 'network_delta_diff_%s-based_layout_xx.%s: cluster-to-cluster communication network in which the edge weight is the difference of %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
warnings.simplefilter("error")
while True:
try:
nxgSD.draw(plotFileName,prog='fdp')
break
except RuntimeWarning as rw:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
if len(errorNodeList)==0:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
nxgSD.draw(plotFileName,prog='neato')
break
for nd in nxgSD.nodes():
if str(nd) in errorNodeList:
nd.attr['xlabel'] = str(nd)
nd.attr['label'] = ''
continue
warnings.simplefilter("ignore")
# draw fold change networks
# calculate fold changes
adjSpecM1 = adjSpecM1.astype(float)
adjSpecM2 = adjSpecM2.astype(float)
adjSpecMF = pd.DataFrame(0.0, index=adjSpecMD.index, columns=adjSpecMD.columns)
for idx in adjSpecMD.index:
for col in adjSpecMD.columns:
if adjSpecMD.loc[idx, col] < 0:
if adjSpecM2.loc[idx, col] > 0:
adjSpecMF.loc[idx, col] = adjSpecM1.loc[idx, col]/adjSpecM2.loc[idx, col]
else:
if adjSpecM1.loc[idx, col] > 0:
adjSpecMF.loc[idx, col] = adjSpecM2.loc[idx, col]/adjSpecM1.loc[idx, col]
maxVal = adjSpecMF.max().max()
#set inf as 2 x max fold change
for idx in adjSpecMD.index:
for col in adjSpecMD.columns:
if adjSpecMD.loc[idx, col] < 0:
if adjSpecM2.loc[idx, col] == 0:
adjSpecMF.loc[idx, col] = maxVal * 2
else:
if adjSpecM1.loc[idx, col] == 0:
adjSpecMF.loc[idx, col] = maxVal * 2
maxVal = adjSpecMF.max().max()
# convert to a graphviz graph
nxgSF = nx.MultiDiGraph(adjSpecMF)
nxgSF = nx.nx_agraph.to_agraph(nxgSF)
nxgSF.graph_attr.update(fontname = "Arial")
nxgSF.graph_attr.update(fontsize = fontSize)
nxgSF.graph_attr.update(margin = 0)
nxgSF.graph_attr.update(ratio="adjSpecMD")
nxgSF.graph_attr.update(label='Delta Network\nEdge weight: fold change of ' + typeStr)
nxgSF.graph_attr.update(size = "%s,%s" % (plotWidth, plotHeight))
# set edge properties
for ed in nxgSF.edges():
sn = ed[0]
tn = ed[1]
if adjSpecMD.loc[sn, tn] < 0 and adjSpecM2.loc[sn, tn] > 0 and adjSpecM1.loc[sn, tn]/adjSpecM2.loc[sn, tn] > 2:
ed.attr['color'] = '#FF0000'
elif adjSpecMD.loc[sn, tn] > 0 and adjSpecM1.loc[sn, tn] > 0 and adjSpecM2.loc[sn, tn]/adjSpecM1.loc[sn, tn] > 2:
ed.attr['color'] = '#0000FF'
else:
ed.attr['color'] = '#FFFF00'
#sclc = float(adjSpecMF.loc[sn, tn])/maxVal*1.0
if adjSpecMD.loc[sn, tn] > 0 and ed.attr['color'] != '#FFFF00':#to red
#ed.attr['color'] = matplotlib.colors.to_hex([1.0, 1.0-sclc, 0.0])
#ed.attr['color'] = matplotlib.colors.to_hex((low0[0]+deltal[0]*sclc,low0[1]+deltal[1]*sclc,low0[2]+deltal[2]*sclc))
pass
elif adjSpecMD.loc[sn, tn] < 0 and ed.attr['color'] != '#FFFF00':
#ed.attr['color'] = matplotlib.colors.to_hex([1.0-sclc, 1.0, 0.0])
#ed.attr['color'] = matplotlib.colors.to_hex((high0[0]+deltah[0]*sclc,high0[1]+deltah[1]*sclc,high0[2]+deltah[2]*sclc))
pass
if edgeWidth == 0:
ed.attr['fontsize'] = fontSize
ed.attr['fontname'] = "Arial"
if adjSpecMD.loc[sn, tn] > 0:
ed.attr['label'] = '%.2f' % (adjSpecMF.loc[sn, tn])
else:
ed.attr['label'] = '-%.2f' % (adjSpecMF.loc[sn, tn])
else:
if adjSpecMF.loc[sn, tn]*edgeWidth/maxVal < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(adjSpecMF.loc[sn, tn]*edgeWidth/maxVal)
# set node color
nxgSF.node_attr['style']='filled,setlinewidth(0)'
idx = 0
maxCltSize = max(cltSizeDict.values())
for lb in labels:
try:
nd=nxgSF.get_node(lb)
except:
continue
nd.attr['shape'] = 'circle'
if maxClusterSize != 0:
radis = cltSizeDict[nd]*maxClusterSize/maxCltSize
nd.attr['width'] = str(round(radis,2))
nd.attr['height'] = str(round(radis,2))
nd.attr['fixedsize'] = 'true'
nd.attr['fillcolor'] = matplotlib.colors.to_hex(cltSizeColorDict[str(nd).split('\n')[0]])
nd.attr['pin'] = 'true'
nd.attr['pos'] = '%s,%s' % (wposDict[idx][0]*plotWidth/2,wposDict[idx][1]*plotHeight/2)
nd.attr['fontsize'] = fontSize
nd.attr['fontname'] = "Arial"
idx += 1
plotFileName = 'network_fold_change_%s-based_layout_%s.%s' % (typeStr, layout, plotFormat)
readmeStr += 'network_fold_change_%s-based_layout_xx.%s: cluster-to-cluster communication network in which the edge weight is the fold change of %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
warnings.simplefilter("error")
while True:
try:
nxgSF.draw(plotFileName,prog='fdp')
break
except RuntimeWarning as rw:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
if len(errorNodeList)==0:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
nxgSF.draw(plotFileName,prog='neato')
break
for nd in nxgSF.nodes():
if str(nd) in errorNodeList:
nd.attr['xlabel'] = str(nd)
nd.attr['label'] = ''
continue
warnings.simplefilter("ignore")
for idx in adjSpecMD.index:
for col in adjSpecMD.columns:
if adjSpecMD.loc[idx, col] < 0:
if adjSpecM2.loc[idx, col] == 0:
adjSpecMF.loc[idx, col] = adjSpecM1.loc[idx, col] / adjSpecM2.loc[idx, col] * -1
else:
if adjSpecM1.loc[idx, col] == 0:
adjSpecMF.loc[idx, col] = adjSpecM2.loc[idx, col] / adjSpecM1.loc[idx, col]
return readmeStr, adjSpecMF
def DrawChord(readmeStr, typeStr, numStr, tempM, colors, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold, weightThreshold, frequencyThreshold, interDB, weightType, plotFormat):
import sys
if 2 == sys.version_info[0]:
return readmeStr
from matplotlib.colors import LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list('Custom', colors, len(colors))
newlbls = []
for idx in tempM.index:
newlbls.append(idx.split('\n')[0])
oldidx = tempM.index
tempM.index = newlbls
newcts = []
for col in tempM.columns:
newcts.append(col.split('\n')[0])
oldcol = tempM.columns
tempM.columns = newcts
import holoviews as hv
from holoviews import dim, opts
hv.extension('matplotlib')
nodeDF = pd.DataFrame({'name':list(newcts)}).reset_index()
nodes = hv.Dataset(nodeDF, 'index')
linkDict = {'source':[],'target':[],'value':[]}
for idx in tempM.index:
sid = int(nodeDF.loc[nodeDF['name']==idx,'index'])
for col in tempM.columns:
if tempM.loc[idx,col] > 0:
tid = int(nodeDF.loc[nodeDF['name']==col,'index'])
linkDict['source'].append(sid)
linkDict['target'].append(tid)
linkDict['value'].append(int(tempM.loc[idx,col]*1000))
links = pd.DataFrame(linkDict)
chd = hv.Chord((links, nodes))
chd = chd.opts(opts.Chord(cmap=cmap, node_color=dim('index').astype(str), edge_color=dim('source').astype(str), labels='name', fontscale=200))
chd = chd.opts(fontsize={'legend': fontSize*4, 'labels': fontSize*4, 'legend_title': fontSize*4}, fig_size=plotWidth*plotHeight*5)
if dataType == '':
plotFileName = 'chord_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'chord_%s-based.%s: the cluster-to-cluster communication chord diagram in which the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
else:
plotFileName = '%s_chord_%s-based.%s' % (dataType, typeStr, plotFormat)
readmeStr += 'xxx_chord_%s-based.%s: dynamic cluster-to-cluster communication chord diagram in which the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
hv.save(chd, plotFileName)
tempM.columns = oldcol
tempM.index = oldidx
return readmeStr
def DrawHeatmap(readmeStr, typeStr, numStr, tempM, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold, weightThreshold, frequencyThreshold, interDB, weightType, plotFormat):
newlbls = []
for idx in tempM.index:
newlbls.append(idx.split('\n')[0])
oldidx = tempM.index
tempM.index = newlbls
newcts = []
for col in tempM.columns:
newcts.append(col.split('\n')[0])
oldcol = tempM.columns
tempM.columns = newcts
cmap = 'afmhot_r'
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(max(tempM.shape), max(tempM.shape)))
if numStr == 'int':
if tempM.max().max() < 100:
g = sns.heatmap(tempM.astype(int), square=True, annot=True, fmt="d", linewidths=.5, ax=ax, cmap=cmap)
else:
g = sns.heatmap(tempM.astype(int), square=True, annot=False, linewidths=.5, ax=ax, cmap=cmap)
else:
g = sns.heatmap(tempM, square=True, annot=False, linewidths=.5, ax=ax, cmap=cmap)
ax.set_xlabel('Cell-type expressing receptor (receiving)')
ax.set_ylabel('Cell-type expressing ligand (sending)')
fig = g.get_figure()
if dataType == '':
plotFileName = 'heatmap_%s-based.%s' % (typeStr, plotFormat)
readmeStr += 'heatmap_%s-based.%s: the cluster-to-cluster communication heatmap in which the element is %s.\n' % (typeStr, plotFormat, typeStr)
else:
plotFileName = '%s_heatmap_%s-based.%s' % (dataType, typeStr, plotFormat)
readmeStr += 'xxx_heatmap_%s-based.%s: dynamic cluster-to-cluster communication heatmap in which the element is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
fig.savefig(plotFileName, bbox_inches = "tight")
tempM.columns = oldcol
tempM.index = oldidx
return readmeStr
def DrawGraphvizPlot(readmeStr, typeStr, numStr, nxgS, adjSpecM, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDict, labels, specificityThreshold, weightThreshold, frequencyThreshold, interDB, weightType, layout, plotFormat):
# convert to a graphviz graph
nxgS = nx.nx_agraph.to_agraph(nxgS)
# draw whole network
nxgS.graph_attr.update(fontname = "Arial")
nxgS.graph_attr.update(fontsize = fontSize)
nxgS.graph_attr.update(margin = 0)
nxgS.graph_attr.update(ratio="fill")
nxgS.graph_attr.update(label='Edge weight: ' + typeStr)
nxgS.graph_attr.update(size = "%s,%s" % (plotWidth, plotHeight))
# set edge properties
maxVal = adjSpecM.max().max()
for ed in nxgS.edges():
sn = ed[0]
tn = ed[1]
#newcol = [i+0.5*(1-i) for i in matplotlib.colors.to_rgb(colorDict[sn])]
#ed.attr['color'] = matplotlib.colors.to_hex(newcol)
ed.attr['color'] = colorDict[sn]
if edgeWidth == 0:
ed.attr['fontsize'] = fontSize
ed.attr['fontname'] = "Arial"
if numStr == 'float':
ed.attr['label'] = '%.2f' % (adjSpecM.loc[sn, tn])
elif numStr == 'int':
ed.attr['label'] = '%d' % (adjSpecM.loc[sn, tn])
else:
if adjSpecM.loc[sn, tn]*edgeWidth/maxVal < 1:
ed.attr['penwidth'] = 1
else:
ed.attr['penwidth'] = int(adjSpecM.loc[sn, tn]*edgeWidth/maxVal)
# set node color
nxgS.node_attr['style']='filled,setlinewidth(0)'
idx = 0
maxCltSize = max(cltSizeDict.values())
for lb in adjSpecM.index:
try:
nd=nxgS.get_node(lb)
except:
continue
nd.attr['shape'] = 'circle'
if maxClusterSize != 0:
radis = cltSizeDict[nd]*maxClusterSize/maxCltSize
nd.attr['width'] = str(round(radis,2))
nd.attr['height'] = str(round(radis,2))
nd.attr['fixedsize'] = 'true'
#newcol = [i+0.5*(1-i) for i in matplotlib.colors.to_rgb(colorDict[nd])]
#nd.attr['fillcolor'] = matplotlib.colors.to_hex(newcol)
nd.attr['fillcolor'] = colorDict[nd]
nd.attr['pin'] = 'true'
nd.attr['pos'] = '%s,%s' % (wposDict[idx][0]*plotWidth/2,wposDict[idx][1]*plotHeight/2)
nd.attr['fontsize'] = fontSize
nd.attr['fontname'] = "Arial"
idx += 1
if dataType == '':
plotFileName = 'network_%s-based_layout_%s.%s' % (typeStr, layout, plotFormat)
readmeStr += 'network_%s-based_layout_xx.%s: the cluster-to-cluster communication network in which the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
else:
plotFileName = '%s_network_%s-based_layout_%s.%s' % (dataType, typeStr, layout, plotFormat)
readmeStr += 'xxx_network_%s-based_layout_xx.%s: dynamic cluster-to-cluster communication network in which the edge weight is %s.\n' % (typeStr, plotFormat, typeStr)
plotFileName = os.path.join(resultDir, plotFileName)
warnings.simplefilter("error")
while True:
try:
nxgS.draw(plotFileName,prog='fdp')
break
except RuntimeWarning as rw:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
if len(errorNodeList)==0:
errorNodeList = [m[m.find("'")+1:m.find("',")] for m in str(rw).split("Warning:") if 'too small' in m]
nxgS.draw(plotFileName,prog='neato')
break
for nd in nxgS.nodes():
if str(nd) in errorNodeList:
nd.attr['xlabel'] = str(nd)
nd.attr['label'] = ''
continue
warnings.simplefilter("ignore")
return readmeStr
def BuildDeltaInterClusterNetwork(origlabels, labels, cltSizes, ccolorList, edgeDF, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge, interDB, weightType, layout, plotFormat, plotWidth, plotHeight, fontSize, edgeWidth, maxClusterSize, clusterDistance, resultDir, dataType=''):
readmeStr = '\n'
compT = pd.read_excel(os.path.join(resultDir,'..','cluster_comparison.xlsx'), index_col=None,header=0)
compT = compT.sort_values(by=['Cluster','Source'])
compT = compT.set_index('Cluster')
cltFDdict = {}
maxFC = 0
inflist = []
for idx in range(0,len(compT),2):
if compT.iloc[idx,1] * compT.iloc[idx+1,1] > 0:
if compT.iloc[idx,1] > compT.iloc[idx+1,1]:
tempFC = compT.iloc[idx,1]/compT.iloc[idx+1,1]
cltFDdict[compT.index[idx]] = -1.0*tempFC
if compT.iloc[idx,1] < compT.iloc[idx+1,1]:
tempFC = compT.iloc[idx+1,1]/compT.iloc[idx,1]
cltFDdict[compT.index[idx]] = 1.0*tempFC
if maxFC < tempFC:
maxFC = tempFC
elif compT.iloc[idx,1] * compT.iloc[idx+1,1] == 0:
inflist.append(idx)
#set inf as 2 x max fold change
maxFC = maxFC *2
for idx in inflist:
if compT.iloc[idx,1] != 0:
cltFDdict[compT.index[idx]] = -1.0 * maxFC
else:
cltFDdict[compT.index[idx]] = 1.0 * maxFC
# color scheme
colors = sns.color_palette(ccolorList,len(labels)).as_hex()
colorDict = {}
cltSizeDict = {}
for idx in range(len(colors)):
colorDict[labels[idx]] = colors[idx]
colorDict[origlabels[idx]] = colors[idx]
cltSizeDict[labels[idx]] = cltSizes[idx]
cltSizeDict[origlabels[idx]] = cltSizes[idx]
# build adjacency matrix
edgeDF, adjM1, adjSpecM1, adjCountM1, nxgW1, nxgS1, nxgC1, adjM2, adjSpecM2, adjCountM2, nxgW2, nxgS2, nxgC2, adjMD, adjSpecMD, adjCountMD, nxgWD, nxgSD, nxgCD = BuildDeltaAdjM(edgeDF, origlabels, labels, specificityThreshold, weightThreshold, frequencyThreshold,keepTopEdge)
# get igraph layout from adjacency matrix
wposDict = IgraphFromAdjacency(adjMD, layout, labels, cltSizes, clusterDistance)
wposDictSpec = IgraphFromAdjacency(adjSpecMD, layout, labels, cltSizes, clusterDistance)
wposDictCount = IgraphFromAdjacency(adjCountMD, layout, labels, cltSizes, clusterDistance)
readmeStr += '\n\nThe edge color is the same as the sending cluster.\n'
## draw use graphviz
#============edge count
readmeStr, adjCountMF = DrawDeltaGraphvizPlot(readmeStr, 'edge-count', 'int', nxgC1, adjCountM1, nxgC2, adjCountM2, nxgCD, adjCountMD, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDictCount, cltFDdict, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
#============average weight
readmeStr, adjMF = DrawDeltaGraphvizPlot(readmeStr, 'average-expression', 'float', nxgW1, adjM1, nxgW2, adjM2, nxgWD, adjMD, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDict, cltFDdict, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
#============total specificity
readmeStr, adjSpecMF = DrawDeltaGraphvizPlot(readmeStr, 'total-specificity', 'float', nxgS1, adjSpecM1, nxgS2, adjSpecM2, nxgSD, adjSpecMD, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDictSpec, cltFDdict, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
## draw heatmap
readmeStr = DrawDeltaHeatmap(readmeStr, 'edge-count', 'int', adjCountM1, adjCountM2, adjCountMD, adjCountMF, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawDeltaHeatmap(readmeStr, 'total-expression', 'float', adjM1, adjM2, adjMD, adjMF, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawDeltaHeatmap(readmeStr, 'total-specificity', 'float', adjSpecM1, adjSpecM2, adjSpecMD, adjSpecMF, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
for idx in adjMF.index:
for col in adjMF.columns:
if adjMD.loc[idx,col] < 0:
adjMF.loc[idx,col] = adjMF.loc[idx,col] * -1
for idx in adjCountMF.index:
for col in adjCountMF.columns:
if adjCountMD.loc[idx,col] < 0:
adjCountMF.loc[idx,col] = adjCountMF.loc[idx,col] * -1
for idx in adjSpecMF.index:
for col in adjSpecMF.columns:
if adjSpecMD.loc[idx,col] < 0:
adjSpecMF.loc[idx,col] = adjSpecMF.loc[idx,col] * -1
adjMFileName = os.path.join(resultDir, '%s_Mtx.xlsx' % dataType)
readmeStr += 'xxx_Mtx.xlsx: all adjacency matrices of the dynamic networks.\n'
writer = pd.ExcelWriter(adjMFileName, engine='xlsxwriter')
readmeDict = {'colA':['README'],'colB':['']}
readmeDict['colA'].append('Matrix 1')
readmeDict['colB'].append('Edge-count-based adjacency matrix in condition 1')
readmeDict['colA'].append('Matrix 2')
readmeDict['colB'].append('Edge-count-based adjacency matrix in condition 2')
readmeDict['colA'].append('Matrix 3')
readmeDict['colB'].append('Adjacency matrix in which each element is the difference between Matrix 1 and Matrix 2')
readmeDict['colA'].append('Matrix 4')
readmeDict['colB'].append('Adjacency matrix in which each element is the fold change between Matrix 1 and Matrix 2')
readmeDict['colA'].append('Matrix 5')
readmeDict['colB'].append('Total-specificity-based adjacency matrix in condition 1')
readmeDict['colA'].append('Matrix 6')
readmeDict['colB'].append('Total-specificity-based adjacency matrix in condition 2')
readmeDict['colA'].append('Matrix 7')
readmeDict['colB'].append('Adjacency matrix in which each element is the difference between Matrix 5 and Matrix 6')
readmeDict['colA'].append('Matrix 8')
readmeDict['colB'].append('Adjacency matrix in which each element is the fold change between Matrix 5 and Matrix 6')
readmeDict['colA'].append('Matrix 9')
readmeDict['colB'].append('Average-expression-based adjacency matrix in condition 1')
readmeDict['colA'].append('Matrix 10')
readmeDict['colB'].append('Average-expression-based adjacency matrix in condition 2')
readmeDict['colA'].append('Matrix 11')
readmeDict['colB'].append('Adjacency matrix in which each element is the difference between Matrix 9 and Matrix 10')
readmeDict['colA'].append('Matrix 12')
readmeDict['colB'].append('Adjacency matrix in which each element is the fold change between Matrix 9 and Matrix 10')
readmeDF = pd.DataFrame(readmeDict)
readmeDF.to_excel(writer, sheet_name='README', index=False, header=False)
adjCountM1.to_excel(writer, sheet_name='Matrix 1')
adjCountM2.to_excel(writer, sheet_name='Matrix 2')
adjCountMD.to_excel(writer, sheet_name='Matrix 3')
adjCountMF.to_excel(writer, sheet_name='Matrix 4')
adjSpecM1.to_excel(writer, sheet_name='Matrix 5')
adjSpecM2.to_excel(writer, sheet_name='Matrix 6')
adjSpecMD.to_excel(writer, sheet_name='Matrix 7')
adjSpecMF.to_excel(writer, sheet_name='Matrix 8')
adjM1.to_excel(writer, sheet_name='Matrix 9')
adjM2.to_excel(writer, sheet_name='Matrix 10')
adjMD.to_excel(writer, sheet_name='Matrix 11')
adjMF.to_excel(writer, sheet_name='Matrix 12')
writer.save()
with open(os.path.join(resultDir,'README.txt'), 'w') as file_object:
file_object.write('README\n')
file_object.write('\n')
file_object.write('The cell-to-cell signaling database: %s\n' % interDB)
file_object.write('The weight type of cell-to-cell signaling: %s\n' % weightType)
if keepTopEdge != 0:
file_object.write('Top edges to draw: %s\n' % keepTopEdge)
else:
file_object.write('Top edges to draw: all\n')
file_object.write('\n')
file_object.write('Expression threshold: %s\n' % weightThreshold)
file_object.write('Specificity threshold: %s\n' % specificityThreshold)
file_object.write('Detection threshold: %s\n' % frequencyThreshold)
file_object.write('\n')
file_object.write(readmeStr)
def BuildInterClusterNetwork(origlabels, labels, cltSizes, ccolorList, edgeDF, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge, interDB, weightType, layout, plotFormat, plotWidth, plotHeight, fontSize, edgeWidth, maxClusterSize, clusterDistance, resultDir, dataType=''):
readmeStr = '\n'
# color scheme
colors = sns.color_palette(ccolorList,len(labels)).as_hex()
colorDict = {}
cltSizeDict = {}
for idx in range(len(colors)):
colorDict[labels[idx]] = colors[idx]
colorDict[origlabels[idx]] = colors[idx]
cltSizeDict[labels[idx]] = cltSizes[idx]
cltSizeDict[origlabels[idx]] = cltSizes[idx]
# build adjacency matrix
edgeDF, adjM, adjSpecM, adjCountM, nxgW, nxgS, nxgC = BuildAdjM(edgeDF, origlabels, labels, specificityThreshold, weightThreshold, keepTopEdge)
# get igraph layout from adjacency matrix
wposDict = IgraphFromAdjacency(adjM, layout, labels, cltSizes, clusterDistance)
wposDictSpec = IgraphFromAdjacency(adjSpecM, layout, labels, cltSizes, clusterDistance)
wposDictCount = IgraphFromAdjacency(adjCountM, layout, labels, cltSizes, clusterDistance)
readmeStr += '\n\nThe edge color is the same as the sending cluster.\n'
## draw use graphviz
#============edge count
readmeStr = DrawGraphvizPlot(readmeStr, 'edge-count', 'int', nxgC, adjCountM, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDictCount, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
#============total weight
readmeStr = DrawGraphvizPlot(readmeStr, 'total-expression', 'float', nxgW, adjM, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDict, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
#============total specificity
readmeStr = DrawGraphvizPlot(readmeStr, 'total-specificity', 'float', nxgS, adjSpecM, dataType, resultDir, plotWidth, plotHeight, fontSize, edgeWidth, colorDict, cltSizeDict, maxClusterSize, wposDictSpec, labels, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,layout,plotFormat)
## draw chord plot
readmeStr = DrawChord(readmeStr, 'edge-count', 'int', adjCountM, colors, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawChord(readmeStr, 'total-expression', 'float', adjM, colors, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawChord(readmeStr, 'total-specificity', 'float', adjSpecM, colors, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
## draw heatmap
readmeStr = DrawHeatmap(readmeStr, 'edge-count', 'int', adjCountM, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawHeatmap(readmeStr, 'total-expression', 'float', adjM, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
readmeStr = DrawHeatmap(readmeStr, 'total-specificity', 'float', adjSpecM, dataType, resultDir, plotWidth, plotHeight, fontSize, specificityThreshold,weightThreshold,frequencyThreshold,interDB,weightType,plotFormat)
## draw top LR pairs
readmeStr = DrawTopLRCltPairs(readmeStr, edgeDF, plotFormat,plotWidth, plotHeight, fontSize, resultDir, interDB, weightType, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge, dataType)
if dataType == '':
edgeDFFileName = os.path.join(resultDir, 'Edges.csv')
readmeStr += 'Edges.csv: all filtered edges.\n'
adjMFileName = os.path.join(resultDir, 'Mtx.xlsx')
readmeStr += 'Mtx.xlsx: three adjacency matrices of the networks.\n'
else:
edgeDFFileName = os.path.join(resultDir, '%s_Edges.csv' % dataType)
readmeStr += 'xxx_edges.csv: all filtered dynamic edges.\n'
adjMFileName = os.path.join(resultDir, '%s_Mtx.xlsx' % dataType)
readmeStr += 'xxx_Mtx.xlsx: three adjacency matrices of the dynamic networks.\n'
if weightType == 'mean':
columns=['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate', 'Ligand average expression value',
'Ligand derived specificity of average expression value', 'Receptor detection rate', 'Receptor average expression value',
'Receptor derived specificity of average expression value', 'Edge average expression weight', 'Edge average expression derived specificity']
else:
columns=['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate', 'Ligand total expression value',
'Ligand derived specificity of total expression value', 'Receptor detection rate', 'Receptor total expression value',
'Receptor derived specificity of total expression value', 'Edge total expression weight', 'Edge total expression derived specificity']
edgeDF = edgeDF.loc[:,edgeDF.columns[:-2]]
edgeDF.columns = columns
edgeDF.to_csv(edgeDFFileName, columns=columns, index=False)
writer = pd.ExcelWriter(adjMFileName, engine='xlsxwriter')
readmeDict = {'colA':['README'],'colB':['']}
readmeDict['colA'].append('Matrix 1')
readmeDict['colB'].append('Edge-count-based adjacency matrix')
readmeDict['colA'].append('Matrix 2')
readmeDict['colB'].append('Total-specificity-based adjacency matrix')
readmeDict['colA'].append('Matrix 3')
readmeDict['colB'].append('Total-expression-based adjacency matrix')
readmeDF = pd.DataFrame(readmeDict)
readmeDF.to_excel(writer, sheet_name='README', index=False, header=False)
adjCountM.to_excel(writer, sheet_name='Matrix 1')
adjSpecM.to_excel(writer, sheet_name='Matrix 2')
adjM.to_excel(writer, sheet_name='Matrix 3')
writer.save()
# save edge list
if keepTopEdge == 0:
edgeDict = {'sending cluster name':[], 'target cluster name':[], 'weight':[], 'count':[], 'specificity':[]}
for send in adjM.index:
for target in adjM.columns:
if adjM.loc[send, target] > 0:
edgeDict['sending cluster name'].append(send.replace('\n',' '))
edgeDict['target cluster name'].append(target.replace('\n',' '))
edgeDict['weight'].append(adjM.loc[send, target])
edgeDict['count'].append(adjCountM.loc[send, target])
edgeDict['specificity'].append(adjSpecM.loc[send, target])
if dataType == '':
edgeFileName = os.path.join(resultDir, 'network_edges.csv')
readmeStr += 'network_edges.csv: five weights of each cluster-to-cluster edge in the network.\n'
else:
edgeFileName = os.path.join(resultDir, '%s_network_edges.csv' % (dataType))
readmeStr += '%s_network_edges.csv: five weights of each cluster-to-cluster edge in the %s network.\n' % (dataType,dataType)
edgeDF = pd.DataFrame(edgeDict)
edgeDF['weight per pair'] = edgeDF['weight']/edgeDF['count']
edgeDF['specificity per pair'] = edgeDF['specificity']/edgeDF['count']
edgeDF = edgeDF.loc[:,['sending cluster name', 'target cluster name', 'weight', 'specificity', 'count', 'weight per pair', 'specificity per pair']]
newCol = ['Sending cluster name', 'Target cluster name', 'Total expression', 'Total specificity', 'Edge count', 'Average expression', 'Average specificity']
edgeDF.columns = newCol
edgeDF.sort_values(by="Total expression", ascending=False).to_csv(edgeFileName,columns=newCol,index=False)
with open(os.path.join(resultDir,'README.txt'), 'w') as file_object:
file_object.write('README\n')
file_object.write('\n')
file_object.write('The cell-to-cell signaling database: %s\n' % interDB)
file_object.write('The weight type of cell-to-cell signaling: %s\n' % weightType)
if keepTopEdge != 0:
file_object.write('Top edges to draw: %s\n' % keepTopEdge)
else:
file_object.write('Top edges to draw: all\n')
file_object.write('\n')
file_object.write('Expression threshold: %s\n' % weightThreshold)
file_object.write('Specificity threshold: %s\n' % specificityThreshold)
file_object.write('Detection threshold: %s\n' % frequencyThreshold)
file_object.write('\n')
file_object.write(readmeStr)
def FilterDeltaEdges(sourceFolder, interDB, weightType, frequencyThreshold):
olddapcols = ['sending cluster name', 'ligand', 'receptor', 'target cluster name', 'delta ligand frequency', 'delta ligand expression', 'delta ligand specificity', 'delta receptor frequency', 'delta receptor expression', 'delta receptor specificity', 'delta weight', 'delta specificity']
refinedOldcols = ['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Delta ligand detection rate', 'Delta ligand expression', 'Delta ligand specificity', 'Delta receptor detection rate', 'Delta receptor expression', 'Delta receptor specificity', 'Delta edge expression weight', 'Delta edge specificity weight']
uredgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'UP-regulated_%s.csv' % (weightType)), index_col=None, header=0)
realuredgeDF = uredgeDF.loc[(uredgeDF['Ligand detection rate in condition 1']>frequencyThreshold)&(uredgeDF['Receptor detection rate in condition 1']>frequencyThreshold)&(uredgeDF['Ligand detection rate in condition 2']>frequencyThreshold)&(uredgeDF['Receptor detection rate in condition 2']>frequencyThreshold),]
realuredgeDF['Delta ligand detection rate'] = realuredgeDF['Ligand detection rate in condition 2'] - realuredgeDF['Ligand detection rate in condition 1']
realuredgeDF['Delta receptor detection rate'] = realuredgeDF['Receptor detection rate in condition 2'] - realuredgeDF['Receptor detection rate in condition 1']
realuredgeDF = realuredgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Delta ligand detection rate', 'Delta ligand expression', 'Delta ligand specificity', 'Delta receptor detection rate', 'Delta receptor expression', 'Delta receptor specificity', 'Delta edge expression weight', 'Delta edge specificity weight']]
realuredgeDF.columns = olddapcols
realuredgeDF = realuredgeDF.reset_index()
realuredgeDF = realuredgeDF.loc[:,realuredgeDF.columns[1:]]
urapedgeDF = uredgeDF.loc[((uredgeDF['Ligand detection rate in condition 1']<=frequencyThreshold)|(uredgeDF['Receptor detection rate in condition 1']<=frequencyThreshold))&(uredgeDF['Ligand detection rate in condition 2']>frequencyThreshold)&(uredgeDF['Receptor detection rate in condition 2']>frequencyThreshold),]
urapedgeDF = urapedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 2', 'Ligand expression in condition 2', 'Ligand specificity in condition 2', 'Receptor detection rate in condition 2', 'Receptor expression in condition 2', 'Receptor specificity in condition 2', 'Edge expression weight in condition 2', 'Edge specificity weight in condition 2']]
urapedgeDF.columns = olddapcols
urdpedgeDF = uredgeDF.loc[(uredgeDF['Ligand detection rate in condition 1']>frequencyThreshold)&(uredgeDF['Receptor detection rate in condition 1']>frequencyThreshold)&((uredgeDF['Ligand detection rate in condition 2']<=frequencyThreshold)|(uredgeDF['Receptor detection rate in condition 2']<=frequencyThreshold)),]
urdpedgeDF = urdpedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 1', 'Ligand expression in condition 1', 'Ligand specificity in condition 1', 'Receptor detection rate in condition 1', 'Receptor expression in condition 1', 'Receptor specificity in condition 1', 'Edge expression weight in condition 1', 'Edge specificity weight in condition 1']]
urdpedgeDF.columns = olddapcols
dredgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'DOWN-regulated_%s.csv' % (weightType)), index_col=None, header=0)
realdredgeDF = dredgeDF.loc[(dredgeDF['Ligand detection rate in condition 1']>frequencyThreshold)&(dredgeDF['Receptor detection rate in condition 1']>frequencyThreshold)&(dredgeDF['Ligand detection rate in condition 2']>frequencyThreshold)&(dredgeDF['Receptor detection rate in condition 2']>frequencyThreshold),]
realdredgeDF['Delta ligand detection rate'] = realdredgeDF['Ligand detection rate in condition 1'] - realdredgeDF['Ligand detection rate in condition 2']
realdredgeDF['Delta receptor detection rate'] = realdredgeDF['Receptor detection rate in condition 1'] - realdredgeDF['Receptor detection rate in condition 2']
realdredgeDF = realdredgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Delta ligand detection rate', 'Delta ligand expression', 'Delta ligand specificity', 'Delta receptor detection rate', 'Delta receptor expression', 'Delta receptor specificity', 'Delta edge expression weight', 'Delta edge specificity weight']]
realdredgeDF.columns = olddapcols
realdredgeDF = realdredgeDF.reset_index()
realdredgeDF = realdredgeDF.loc[:,realdredgeDF.columns[1:]]
drapedgeDF = dredgeDF.loc[((dredgeDF['Ligand detection rate in condition 1']<=frequencyThreshold)|(dredgeDF['Receptor detection rate in condition 1']<=frequencyThreshold))&(dredgeDF['Ligand detection rate in condition 2']>frequencyThreshold)&(dredgeDF['Receptor detection rate in condition 2']>frequencyThreshold),]
drapedgeDF = drapedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 2', 'Ligand expression in condition 2', 'Ligand specificity in condition 2', 'Receptor detection rate in condition 2', 'Receptor expression in condition 2', 'Receptor specificity in condition 2', 'Edge expression weight in condition 2', 'Edge specificity weight in condition 2']]
drapedgeDF.columns = olddapcols
drdpedgeDF = dredgeDF.loc[(dredgeDF['Ligand detection rate in condition 1']>frequencyThreshold)&(dredgeDF['Receptor detection rate in condition 1']>frequencyThreshold)&((dredgeDF['Ligand detection rate in condition 2']<=frequencyThreshold)|(dredgeDF['Receptor detection rate in condition 2']<=frequencyThreshold)),]
drdpedgeDF = drdpedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 1', 'Ligand expression in condition 1', 'Ligand specificity in condition 1', 'Receptor detection rate in condition 1', 'Receptor expression in condition 1', 'Receptor specificity in condition 1', 'Edge expression weight in condition 1', 'Edge specificity weight in condition 1']]
drdpedgeDF.columns = olddapcols
apedgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'Appeared_%s.csv' % (weightType)), index_col=None, header=0)
realapedgeDF = apedgeDF.loc[(apedgeDF['Delta ligand detection rate']>frequencyThreshold)&(apedgeDF['Delta receptor detection rate']>frequencyThreshold),refinedOldcols]
realapedgeDF.columns = olddapcols
dpedgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'Disappeared_%s.csv' % (weightType)), index_col=None, header=0)
realdpedgeDF = dpedgeDF.loc[(dpedgeDF['Delta ligand detection rate']>frequencyThreshold)&(dpedgeDF['Delta receptor detection rate']>frequencyThreshold),refinedOldcols]
realdpedgeDF.columns = olddapcols
stedgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'Stable_%s.csv' % (weightType)), index_col=None, header=0)
stapedgeDF = stedgeDF.loc[((stedgeDF['Ligand detection rate in condition 1']<=frequencyThreshold)|(stedgeDF['Receptor detection rate in condition 1']<=frequencyThreshold))&(stedgeDF['Ligand detection rate in condition 2']>frequencyThreshold)&(stedgeDF['Receptor detection rate in condition 2']>frequencyThreshold),]
stapedgeDF = stapedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 2', 'Ligand expression in condition 2', 'Ligand specificity in condition 2', 'Receptor detection rate in condition 2', 'Receptor expression in condition 2', 'Receptor specificity in condition 2', 'Edge expression weight in condition 2', 'Edge specificity weight in condition 2']]
stapedgeDF.columns = olddapcols
stdpedgeDF = stedgeDF.loc[((stedgeDF['Ligand detection rate in condition 2']<=frequencyThreshold)|(stedgeDF['Receptor detection rate in condition 2']<=frequencyThreshold))&(stedgeDF['Ligand detection rate in condition 1']>frequencyThreshold)&(stedgeDF['Receptor detection rate in condition 1']>frequencyThreshold),]
stdpedgeDF = stdpedgeDF.loc[:,['Sending cluster', 'Ligand symbol', 'Receptor symbol', 'Target cluster', 'Ligand detection rate in condition 1', 'Ligand expression in condition 1', 'Ligand specificity in condition 1', 'Receptor detection rate in condition 1', 'Receptor expression in condition 1', 'Receptor specificity in condition 1', 'Edge expression weight in condition 1', 'Edge specificity weight in condition 1']]
stdpedgeDF.columns = olddapcols
realapedgeDF = pd.concat([realapedgeDF, urapedgeDF, drapedgeDF, stapedgeDF]).reset_index()
realapedgeDF = realapedgeDF.loc[:,realapedgeDF.columns[1:]]
realdpedgeDF = pd.concat([realdpedgeDF, urdpedgeDF, drdpedgeDF, stdpedgeDF]).reset_index()
realdpedgeDF = realdpedgeDF.loc[:,realdpedgeDF.columns[1:]]
alledgeDF = pd.read_csv(os.path.join(sourceFolder, 'Delta_edges_'+interDB, 'All_edges_%s.csv' % (weightType)), index_col=None, header=0)
kindDict = {'all':alledgeDF,'appeared':realapedgeDF, 'disappeared':realdpedgeDF, 'up_regulated':realuredgeDF, 'down_regulated':realdredgeDF}
return kindDict
def MainNetwork(sourceFolder, interDB, weightType, specificityThreshold, weightThreshold, frequencyThreshold, keepTopEdge, layout, plotFormat,plotWidth, plotHeight, fontSize, edgeWidth, maxClusterSize, clusterDistance):
#customize cmap
ccolorList = ['#FF6DB6','#490092','#24FF24','#FFB677','#006DDB','#FFFF6D','#000000','#B66DFF','#920000','#004949','#6DB6FF','#924900','#009292','#B6DBFF','#DBD100']
# load data
clusterMapFilename = os.path.join(sourceFolder, 'ClusterMapping.csv')
if os.path.exists(clusterMapFilename):
# process node properties for single dataset
clusterMapDF = | pd.read_csv(clusterMapFilename, index_col=None, header=0) | pandas.read_csv |
from bapiw.api import API
from datetime import datetime, date
import pandas as pd
import numpy as np
bapiw = API()
class DataParser:
# intervals used when calling kline data
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#enum-definitions
INTERVAL_1MIN = '1m'
INTERVAL_3MIN = '3m'
INTERVAL_5MIN = '5m'
INTERVAL_15MIN = '15m'
INTERVAL_30MIN = '30m'
INTERVAL_1HR = '1h'
INTERVAL_2HR = '2h'
INTERVAL_4HR = '4h'
INTERVAL_6HR = '6h'
INTERVAL_8HR = '8h'
INTERVAL_12HR = '12h'
INTERVAL_1DAY = '1d'
INTERVAL_3DAY = '3d'
INTERVAL_1WEEK = '1w'
INTERVAL_1MONTH = '1M'
def getSymbols(self, onlyTrading=True, includes=''):
# pulls all exchange info
exchange = bapiw.get_exchangeInfo()
# by default onlyTrading is set True which will only shows symbols
# with a status of TRADING on binance. Anything else shows
# all symbols.
# look for symbols in exchanges data, check the status for TRADING and
# add those symbols to a list
symbol_list = []
if onlyTrading:
for zd in exchange['symbols']:
if zd['status'] == 'TRADING':
symbol_list.append(zd['symbol'])
else:
for zd in exchange['symbols']:
symbol_list.append(zd['symbol'])
# create a dataframe with the symbols and rename the column from 0 to symbols
symbols = pd.DataFrame(symbol_list)
symbols = symbols.rename(columns={0: 'symbols'})
# if includes isn't null it will only list symbols that include that string
if includes:
# searches for the symbols that contain string 'includes' and puts them in mysymbols var
mysymbols = symbols[symbols['symbols'].str.contains(includes)]
# replace those symbols in a Dataframe, reset the index
# and delete the old unaccurate index
mysymbols = pd.DataFrame(mysymbols['symbols'])
mysymbols = mysymbols.reset_index()
mysymbols = mysymbols.drop(columns=['index'])
symbols = mysymbols
return symbols
def getKlines(self, symbol, interval, startTime='', endTime='', limit=500, data='ohlcv'):
# pull data from api
kdata = bapiw.get_klines(symbol=symbol, interval=interval, startTime=startTime, endTime=endTime, limit=limit)
# put data into dataframe and remove columns that aren't needed
df = pd.DataFrame.from_dict(kdata)
df = df.drop(range(6, 7), axis=1)
df = df.drop(range(9, 12), axis=1)
df_date = df[0]
final_date = []
# convert the date
for time in df_date.unique():
readable = datetime.fromtimestamp(int(time / 1000))
final_date.append(readable)
# remove the old date from the dataframe and add the new date as index
df.pop(0)
dateframe_final_date = pd.DataFrame({'Date': final_date})
df = df.join(dateframe_final_date)
df.set_index('Date', inplace=True)
# rename the columns
df = df.rename(columns={1: 'Open', 2: 'High', 3: 'Low', 4: 'Close', 5: 'Volume', 7: 'Quote Volume', 8: '# Trades'})
# convert the values into numeric so we can compute them easily
df['Open'] = pd.to_numeric(df['Open'])
df['High'] = pd.to_numeric(df['High'])
df['Low'] = pd.to_numeric(df['Low'])
df['Close'] = pd.to_numeric(df['Close'])
df['Volume'] = pd.to_numeric(df['Volume'])
df['Quote Volume'] = pd.to_numeric(df['Quote Volume'])
df['# Trades'] = pd.to_numeric(df['# Trades'])
# look in 'data' for what values to return
if data.find('o') == -1:
df = df.drop(columns=['Open'])
if data.find('h') == -1:
df = df.drop(columns=['High'])
if data.find('l') == -1:
df = df.drop(columns=['Low'])
if data.find('c') == -1:
df = df.drop(columns=['Close'])
if data.find('v') == -1:
df = df.drop(columns=['Volume'])
if data.find('q') == -1:
df = df.drop(columns=['Quote Volume'])
if data.find('t') == -1:
df = df.drop(columns=['# Trades'])
return df
def getOrderBookBids(self, symbol, limit=100):
data = bapiw.get_orderbook(symbol=symbol, limit=limit)
x = data['bids']
bidprice, bidquantity = zip(*x)
bdf = pd.DataFrame({'bidPrice0': bidprice, 'bidQuantity0': bidquantity})
bdf['bidPrice0'] = pd.to_numeric(bdf['bidPrice0'])
bdf['bidQuantity0'] = pd.to_numeric(bdf['bidQuantity0'])
bdf['date0'] = datetime.today().strftime('%Y-%m-%d %X')
return bdf
def getOrderBookAsks(self, symbol, limit=100):
data = bapiw.get_orderbook(symbol=symbol, limit=limit)
y = data['asks']
askprice, askquantity = zip(*y)
adf = | pd.DataFrame({'askPrice0': askprice, 'askQuantity0': askquantity}) | pandas.DataFrame |
#!/opt/conda/envs/feature-detection/bin/python
# main.py
# 1. load point cloud in modelnet40 normal format
# 2. calculate ISS keypoints
# 3. calculate FPFH or SHOT for detected keypoints
# 3. visualize the results
import os
import sys
import copy
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
import numpy as np
np.set_printoptions(suppress=True)
import open3d as o3d
# my envs
# env:environment/mmlab_point3d_open3d/bin/python open3d==0.9.0.0 numpy==1.19.5
# env:environment/miniconda3_py17/bin/python open3d==0.13.0 numpy==1.21.0
import argparse
# IO utils:
from myio import read_modelnet40_normal
# detector:
from iss import detect
# descriptor:
from fpfh import describe
import numpy as np
import pandas as pd
import open3d as o3d
import seaborn as sns
import matplotlib.pyplot as plt
def get_arguments():
"""
Get command-line arguments
"""
# init parser:
parser = argparse.ArgumentParser("Detect ISS keypoints on ModelNet40 dataset.")
# add required and optional groups:
required = parser.add_argument_group('Required')
optional = parser.add_argument_group('Optional')
# -i /workspace/data/modelnet40_normal_resampled/chair/chair_0001.txt -r 0.05
# add required:
required.add_argument(
"-i", dest="input", help="Input path of ModelNet40 sample.",
default="/home/ninghua/windows10/media_2.0T/dataset/ModelNet/modelnet40_normal_resampled/chair/chair_0001.txt",
required=False
)
required.add_argument(
"-r", dest="radius", help="Radius for radius nearest neighbor definition.",
required=False, type=float, default=0.05
)
# parse arguments:
return parser.parse_args()
if __name__ == '__main__':
# parse arguments:
arguments = get_arguments()
# load point cloud:
point_cloud = read_modelnet40_normal(arguments.input)
# compute surface normals:
# point_cloud.estimate_normals(
# search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30)
# )
# build search tree:
search_tree = o3d.geometry.KDTreeFlann(point_cloud)
# detect keypoints:
keypoints = detect(point_cloud, search_tree, arguments.radius)
# visualize:
# paint background as grey:
point_cloud.paint_uniform_color([0.50, 0.50, 0.50])
# show roi:
max_bound = point_cloud.get_max_bound()
min_bound = point_cloud.get_min_bound()
center = (min_bound + max_bound) / 2.0
min_bound[1] = max_bound[1] - 0.1
max_bound[1] = max_bound[1]
min_bound[2] = center[2]
max_bound[2] = max_bound[2]
bounding_box = o3d.geometry.AxisAlignedBoundingBox(
min_bound = min_bound,
max_bound = max_bound
)
roi = point_cloud.crop(bounding_box)
roi.paint_uniform_color([1.00, 0.00, 0.00])
# paint keypoints as red:
keypoints_in_roi = keypoints.loc[
(
((keypoints['x'] >= min_bound[0]) & (keypoints['x'] <= max_bound[0])) &
((keypoints['y'] >= min_bound[1]) & (keypoints['y'] <= max_bound[1])) &
((keypoints['z'] >= min_bound[2]) & (keypoints['z'] <= max_bound[2]))
),
:
]
np.asarray(point_cloud.colors)[keypoints_in_roi['id'].values, :] = [1.0, 0.0, 0.0]
o3d.visualization.draw_geometries([point_cloud])
# describe keypoints:
df_signature_visualization = []
for keypoint_id in keypoints_in_roi['id'].values:
signature = describe(point_cloud, search_tree, keypoint_id, arguments.radius, 6)
df_ = pd.DataFrame.from_dict(
{
'index': np.arange(len(signature)),
'feature': signature
}
)
df_['keypoint_id'] = keypoint_id # f'{keypoint_id:06d}'
df_signature_visualization.append(df_)
# https://discuss.analyticsvidhya.com/t/python-error-cannot-reindex-from-a-duplicate-axis/6333/2
#df_signature_visualization = pd.concat(df_signature_visualization[:6], ignore_index=True)
df_signature_visualization = | pd.concat(df_signature_visualization, ignore_index=True) | pandas.concat |
import torch
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tqdm
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, accuracy_score
#####################################################################################
experiment_name = "Jun1"
mapping_file_location = "./data/mappings/mapping_all.csv"
scores_location = "./results/EVE_scores/BPU/all_EVE_scores_May23.csv"
labels_location = "./data/labels/All_3k_proteins_ClinVar_labels_May21.csv"
create_concatenation_baseline_scores = False
raw_baseline_scores_location = '/n/groups/marks/projects/marks_lab_and_oatml/DRP_part_2/dbNSFP_single_trascript_files'
concatenated_baseline_scores_location = './data/baseline_scores/all_baseline_scores_'+experiment_name+'.csv'
merged_file_eve_clinvar_baseline_location = './results/EVE_scores/BPU/all_EVE_Clinvar_baselines_BP_'+experiment_name+'.csv'
AUC_accuracy_all_location = './results/AUC_Accuracy/AUC_accuracy_all_'+experiment_name+'.csv'
AUC_accuracy_75pct_location = './results/AUC_Accuracy/AUC_accuracy_75pct_'+experiment_name+'.csv'
AUC_accuracy_all_position_level_location = './results/AUC_Accuracy/AUC_accuracy_all_position_level_'+experiment_name+'.csv'
AUC_accuracy_75pct_position_level_location = './results/AUC_Accuracy/AUC_accuracy_75pct_position_level_'+experiment_name+'.csv'
#####################################################################################
mapping_file = pd.read_csv(mapping_file_location,low_memory=False)
list_proteins = list(mapping_file.protein_name)
num_proteins_to_score = len(mapping_file.protein_name)
print("Number of proteins to score: "+str(num_proteins_to_score))
#####################################################################################
## Create concatenated file with all baseline scores
#####################################################################################
mapping_pid_filename = pd.read_csv('./data/mappings/mapping_pid_baseline-filename.csv',low_memory=False)
mapping_baseline_score = pd.read_csv('./data/mappings/mapping_baseline_score_cleanup.csv',low_memory=False)
variables_to_keep=[
'pid',
'mutant',
'clinvar_clnsig',
'BayesDel_addAF_score',
'BayesDel_noAF_score',
'CADD_phred',
'CADD_phred_hg19',
'CADD_raw',
'CADD_raw_hg19',
'ClinPred_score',
'DANN_score',
'DEOGEN2_score',
'Eigen-PC-phred_coding',
'Eigen-PC-raw_coding',
'Eigen-phred_coding',
'Eigen-raw_coding',
'FATHMM_score',
'fathmm-MKL_coding_score',
'fathmm-XF_coding_score',
'GenoCanyon_score',
'LIST-S2_score',
'LRT_score',
'M-CAP_score',
'MetaLR_score',
'MetaSVM_score',
'MPC_score',
'MutationAssessor_score',
'MutationTaster_score',
'MutPred_score',
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score',
'PrimateAI_score',
'PROVEAN_score',
'REVEL_score',
'SIFT_score',
'SIFT4G_score',
'VEST4_score',
'BayesDel_addAF_pred',
'BayesDel_noAF_pred',
'ClinPred_pred',
'DEOGEN2_pred',
'FATHMM_pred',
'fathmm-MKL_coding_pred',
'fathmm-XF_coding_pred',
'LIST-S2_pred',
'LRT_pred',
'M-CAP_pred',
'MetaLR_pred',
'MetaSVM_pred',
'MutationAssessor_pred',
'MutationTaster_pred',
'PrimateAI_pred',
'PROVEAN_pred',
'SIFT_pred',
'SIFT4G_pred'
#'Aloft_pred'
]
scoring_variables=[
'BayesDel_addAF_score',
'BayesDel_noAF_score',
'CADD_phred',
'CADD_phred_hg19',
'CADD_raw',
'CADD_raw_hg19',
'ClinPred_score',
'DANN_score',
'DEOGEN2_score',
'Eigen-PC-phred_coding',
'Eigen-PC-raw_coding',
'Eigen-phred_coding',
'Eigen-raw_coding',
'FATHMM_score',
'fathmm-MKL_coding_score',
'fathmm-XF_coding_score',
'GenoCanyon_score',
'LIST-S2_score',
'LRT_score',
'M-CAP_score',
'MetaLR_score',
'MetaSVM_score',
'MPC_score',
'MutationAssessor_score',
'MutationTaster_score',
'MutPred_score',
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score',
'PrimateAI_score',
'PROVEAN_score',
'REVEL_score',
'SIFT_score',
'SIFT4G_score',
'VEST4_score'
]
pred_variables_mapping_DT=[
'BayesDel_addAF_pred',
'BayesDel_noAF_pred',
'ClinPred_pred',
'DEOGEN2_pred',
'FATHMM_pred',
'LIST-S2_pred',
'M-CAP_pred',
'MetaLR_pred',
'MetaSVM_pred',
'PrimateAI_pred',
'SIFT_pred',
'SIFT4G_pred'
]
pred_variables_mapping_DN=[
'fathmm-MKL_coding_pred',
'fathmm-XF_coding_pred',
'LRT_pred',
'PROVEAN_pred'
]
pred_variables_to_threshold=[
'MVP_score',
'Polyphen2_HDIV_score',
'Polyphen2_HVAR_score'
]
if create_concatenation_baseline_scores:
list_processed_scoring_files=[]
for protein_name in tqdm.tqdm(list_proteins):
try:
baseline_filename = mapping_pid_filename['filename'][mapping_pid_filename['pid']==protein_name].iloc[0]
scoring_file = pd.read_csv(raw_baseline_scores_location+os.sep+baseline_filename, low_memory=False)
scoring_file['pid']=[protein_name]*len(scoring_file)
scoring_file['mutant']=scoring_file['aaref']+scoring_file['aapos'].astype(str)+scoring_file['aaalt']
scoring_file=scoring_file[variables_to_keep]
for score_var in scoring_variables:
scoring_file[score_var]=pd.to_numeric(scoring_file[score_var], errors="coerce") * int(mapping_baseline_score['directionality'][mapping_baseline_score['prediction_name']==score_var].iloc[0])
for pred_var in pred_variables_mapping_DT:
scoring_file[pred_var]=scoring_file[pred_var].map({"D":"Pathogenic", "T":"Benign"})
for pred_var in pred_variables_mapping_DN:
scoring_file[pred_var]=scoring_file[pred_var].map({"D":"Pathogenic", "N":"Benign"})
#scoring_file['Aloft_pred']=scoring_file['Aloft_pred'].map({"R":"Pathogenic", "D":"Pathogenic","T":"Benign"})
scoring_file['MutationAssessor_pred']=scoring_file['MutationAssessor_pred'].map({"H":"Pathogenic","M":"Pathogenic", "L":"Benign", "N":"Benign"})
scoring_file['Polyphen2_HDIV_pred']=(scoring_file['Polyphen2_HDIV_score']>0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['Polyphen2_HVAR_pred']=(scoring_file['Polyphen2_HVAR_score']>0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['MutationTaster_pred']=(scoring_file['MutationTaster_score']<0.5).map({True:"Pathogenic", False:"Benign"})
scoring_file['MVP_pred']=(scoring_file['MVP_score']>0.7).map({True:"Pathogenic", False:"Benign"})
list_processed_scoring_files.append(scoring_file)
except:
print("Problem processing baseline scores for: "+str(protein_name))
#try:
# all_baseline_scores = pd.concat([all_baseline_scores,scoring_file], axis=0)
#except:
# all_baseline_scores = scoring_file
all_baseline_scores = pd.concat(list_processed_scoring_files, axis=0)
all_baseline_scores.to_csv(concatenated_baseline_scores_location,index=False)
classification_variables=[
'BayesDel_addAF_pred',
'BayesDel_noAF_pred',
'ClinPred_pred',
'DEOGEN2_pred',
'FATHMM_pred',
'LIST-S2_pred',
'M-CAP_pred',
'MetaLR_pred',
'MetaSVM_pred',
'PrimateAI_pred',
'SIFT_pred',
'SIFT4G_pred',
'fathmm-MKL_coding_pred',
'fathmm-XF_coding_pred',
'LRT_pred',
'PROVEAN_pred',
#'Aloft_pred',
'MutationAssessor_pred',
'Polyphen2_HDIV_pred',
'Polyphen2_HVAR_pred',
'MVP_pred'
]
#####################################################################################
#Merge EVE scores and baseline scores
if not os.path.exists(merged_file_eve_clinvar_baseline_location):
data = pd.read_csv(scores_location) #protein_name,mutations,evol_indices,EVE_scores,EVE_classes_100_pct_retained,uncertainty,uncertainty_deciles,uncertainty_quartiles,EVE_classes_10_pct_retained,EVE_classes_20_pct_retained,EVE_classes_30_pct_retained,EVE_classes_40_pct_retained,EVE_classes_50_pct_retained,EVE_classes_60_pct_retained,EVE_classes_70_pct_retained,EVE_classes_80_pct_retained,EVE_classes_90_pct_retained,GMM_class_25_pct_retained,GMM_class_75_pct_retained
ClinVar_labels = | pd.read_csv(labels_location) | pandas.read_csv |
#!pip install plotnine
import numpy as np
import pandas as pd
from plotnine import *
def plot_factor_spatial(adata, fact, cluster_names,
fact_ind=[0], trans="log",
sample_name=None, samples_col='sample',
obs_x='imagecol', obs_y='imagerow',
n_columns=6,
max_col=5000,
col_breaks=[0.1, 100, 1000, 3000],
figure_size=(24, 5.7),
point_size=0.8, text_size=9):
r"""Plot expression of factors / cell types in space.
Convenient but not as powerful as scanpy plotting.
:param adata: anndata object with spatial data
:param fact: pd.DataFrame with spatial expression of factors (W), e.g. mod.spot_factors_df
:param cluster_names: names of those factors to show on a plot
:param fact_ind: index of factors to plot
:param trans: transform colorscale? passed to plotnine.scale_color_cmap
:param sample_name: if anndata object contains multiple samples specify which sample to plot (no warning given if not)
:param samples_col: if anndata object contains multiple which .obs columns specifies sample?
:param obs_x: which .obs columns specifies x coordinate?
:param obs_y: which .obs columns specifies y coordinate?
:param n_columns: how many factors / clusters to plot in each row (plotnine.facet_grid)
:param max_col: colorscale maximum expression in fact
:param col_breaks: colorscale breaks
:param figure_size: figures size works weirdly (only x axis has an effect, use 24 for 6-column plot, 12 for 3, 8 for 2 ...).
:param point_size: point size of spots
:param text_size: text size
"""
if sample_name is not None:
sample_ind = np.isin(adata.obs[samples_col], sample_name)
else:
sample_ind = np.repeat(True, adata.shape[0])
# adata.obsm['X_spatial'][:,0] vs adata.obs['imagecol'] & adata.obs['imagerow']
for_plot = np.concatenate((adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, fact_ind[0]].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[fact_ind[0]] for j in range(adata.obs.shape[0])]).reshape(
(adata.obs.shape[0], 1))),
1)
for_plot = pd.DataFrame(for_plot, index=adata.obs.index,
columns=['imagecol', 'imagerow', 'weights', 'cluster'])
# select only correct sample
for_plot = for_plot.loc[sample_ind, :]
for i in fact_ind[1:]:
for_plot1 = np.concatenate((adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, i].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[i] for j in range(adata.obs.shape[0])]).reshape(
(adata.obs.shape[0], 1))),
1)
for_plot1 = pd.DataFrame(for_plot1, index=adata.obs.index,
columns=['imagecol', 'imagerow', 'weights', 'cluster'])
# select only correct sample
for_plot1 = for_plot1.loc[sample_ind, :]
for_plot = pd.concat((for_plot, for_plot1))
for_plot['imagecol'] = | pd.to_numeric(for_plot['imagecol']) | pandas.to_numeric |
import numpy as np
import pandas as pd
import sys
init_path = sys.argv[1]
og_path = sys.argv[1].split('/')
file = og_path.pop()
pre_dir = og_path
pdy_data = np.load(init_path, allow_pickle=True)
# csv_data = pd.read_csv(init_path, header=None, index_col=False)
data = | pd.DataFrame(pdy_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pytest
from pandas.compat import PY3, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame
import pandas.util.testing as tm
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import pandas as pd
import numpy as np
import statsmodels as sm
import statsmodels.api as smapi
import math
from pyqstrat.pq_utils import monotonically_increasing, infer_frequency
from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot
import matplotlib as mpl
import matplotlib.figure as mpl_fig
from typing import Tuple, Sequence, Mapping, MutableMapping, Optional, Any, Callable, Dict
def compute_periods_per_year(timestamps: np.ndarray) -> float:
"""
Computes trading periods per year for an array of numpy datetime64's.
e.g. if most of the timestamps are separated by 1 day, will return 252.
Args:
timestamps: a numpy array of datetime64's
>>> compute_periods_per_year(np.array(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-09'], dtype='M8[D]'))
252.0
>>> round(compute_periods_per_year(np.array(['2018-01-01 10:00', '2018-01-01 10:05', '2018-01-01 10:10'], dtype='M8[m]')), 2)
72576.05
"""
if not len(timestamps): return np.nan
freq = infer_frequency(timestamps)
return 252. / freq if freq != 0 else np.nan
def compute_amean(returns: np.ndarray, periods_per_year: int) -> float:
'''
Computes arithmetic mean of a return array, ignoring NaNs
Args:
returns: Represents returns at any frequency
periods_per_year: Frequency of the returns, e.g. 252 for daily returns
>>> compute_amean(np.array([0.003, 0.004, np.nan]), 252)
0.882
'''
if not len(returns): return np.nan
return np.nanmean(returns) * periods_per_year
def compute_num_periods(timestamps: np.ndarray, periods_per_year: float) -> float:
'''
Given an array of timestamps, we compute how many periods there are between the first and last element, where the length
of a period is defined by periods_per_year. For example, if there are 6 periods per year,
then each period would be approx. 2 months long.
Args:
timestamps (np.ndarray of np.datetime64): a numpy array of returns, can contain nans
periods_per_year: number of periods between first and last return
>>> compute_num_periods(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), 6)
2.0
'''
if not len(timestamps): return np.nan
assert(monotonically_increasing(timestamps))
fraction_of_year = (timestamps[-1] - timestamps[0]) / (np.timedelta64(1, 's') * 365 * 24 * 60 * 60)
return round(fraction_of_year * periods_per_year)
def compute_gmean(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> float:
"""
Compute geometric mean of an array of returns
Args:
returns: a numpy array of returns, can contain nans
periods_per_year: Used for annualizing returns
>>> round(compute_gmean(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), np.array([0.001, 0.002, 0.003]), 252.), 6)
0.018362
"""
if not len(returns): return np.nan
assert(len(returns) == len(timestamps))
assert(isinstance(timestamps, np.ndarray) and isinstance(returns, np.ndarray))
mask = np.isfinite(returns)
timestamps = timestamps[mask]
returns = returns[mask]
num_periods = compute_num_periods(timestamps, periods_per_year)
g_mean = ((1.0 + returns).prod())**(1.0 / num_periods)
g_mean = np.power(g_mean, periods_per_year) - 1.0
return g_mean
def compute_std(returns: np.ndarray) -> float:
""" Computes standard deviation of an array of returns, ignoring nans """
if not len(returns): return np.nan
return np.nanstd(returns)
def compute_sortino(returns: np.ndarray, amean: float, periods_per_year: float) -> float:
'''
Note that this assumes target return is 0.
Args:
returns: a numpy array of returns
amean: arithmetic mean of returns
periods_per_year: number of trading periods per year
>>> print(round(compute_sortino(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6))
0.133631
'''
if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan
returns = np.where((~np.isfinite(returns)), 0.0, returns)
normalized_rets = np.where(returns > 0.0, 0.0, returns)
sortino_denom = np.std(normalized_rets)
sortino = np.nan if sortino_denom == 0 else amean / (sortino_denom * np.sqrt(periods_per_year))
return sortino
def compute_sharpe(returns: np.ndarray, amean: float, periods_per_year: float) -> float:
'''
Note that this does not take into risk free returns so it's really a sharpe0, i.e. assumes risk free returns are 0
Args:
returns: a numpy array of returns
amean: arithmetic mean of returns
periods_per_year: number of trading periods per year
>>> round(compute_sharpe(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6)
0.050508
'''
if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan
returns = np.where((~np.isfinite(returns)), 0.0, returns)
s = np.std(returns)
sharpe = np.nan if s == 0 else amean / (s * np.sqrt(periods_per_year))
return sharpe
def compute_k_ratio(equity: np.ndarray, periods_per_year: int, halflife_years: float = None) -> float:
'''
Compute k-ratio (2013 or original versions by <NAME>). See https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2230949
We also implement a modification that allows higher weighting for more recent returns.
Args:
equity: a numpy array of the equity in your account
periods_per_year: 252 for daily values
halflife_years: If set, we use weighted linear regression to give less weight to older returns.
In this case, we compute the original k-ratio which does not use periods per year or number of observations
If not set, we compute the 2013 version of the k-ratio which weights k-ratio by sqrt(periods_per_year) / nobs
Returns:
weighted or unweighted k-ratio
>>> np.random.seed(0)
>>> t = np.arange(1000)
>>> ret = np.random.normal(loc = 0.0025, scale = 0.01, size = len(t))
>>> equity = (1 + ret).cumprod()
>>> assert(math.isclose(compute_k_ratio(equity, 252, None), 3.888, abs_tol=0.001))
>>> assert(math.isclose(compute_k_ratio(equity, 252, 0.5), 602.140, abs_tol=0.001))
'''
equity = equity[np.isfinite(equity)]
equity = np.log(equity)
t = np.arange(len(equity))
if halflife_years:
halflife = halflife_years * periods_per_year
k = math.log(0.5) / halflife
w = np.empty(len(equity), dtype=np.float)
w = np.exp(k * t)
w = w ** 2 # Statsmodels requires square of weights
w = w[::-1]
fit = sm.regression.linear_model.WLS(endog=equity, exog=t, weights=w, hasconst=False).fit()
k_ratio = fit.params[0] / fit.bse[0]
else:
fit = smapi.OLS(endog=equity, exog=np.arange(len(equity)), hasconst=False).fit()
k_ratio = fit.params[0] * math.sqrt(periods_per_year) / (fit.bse[0] * len(equity))
return k_ratio
def compute_equity(timestamps: np.ndarray, starting_equity: float, returns: np.ndarray) -> np.ndarray:
''' Given starting equity, timestamps and returns, create a numpy array of equity at each date'''
return starting_equity * np.cumprod(1. + returns)
def compute_rolling_dd(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
'''
Compute numpy array of rolling drawdown percentage
Args:
timestamps: numpy array of datetime64
equity: numpy array of equity
'''
assert(len(timestamps) == len(equity))
if not len(timestamps): return np.array([], dtype='M8[ns]'), np.array([], dtype=np.float)
s = pd.Series(equity, index=timestamps)
rolling_max = s.expanding(min_periods=1).max()
dd = np.where(s >= rolling_max, 0.0, -(s - rolling_max) / rolling_max)
return timestamps, dd
def compute_maxdd_pct(rolling_dd: np.ndarray) -> float:
'''Compute max drawdown percentage given a numpy array of rolling drawdowns, ignoring NaNs'''
if not len(rolling_dd): return np.nan
return np.nanmax(rolling_dd)
def compute_maxdd_date(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray) -> float:
''' Compute date of max drawdown given numpy array of timestamps, and corresponding rolling dd percentages'''
if not len(rolling_dd_dates): return pd.NaT
assert(len(rolling_dd_dates) == len(rolling_dd))
return rolling_dd_dates[np.argmax(rolling_dd)]
def compute_maxdd_start(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray, mdd_date: np.datetime64) -> np.datetime64:
'''Compute date when max drawdown starts, given numpy array of timestamps corresponding rolling dd
percentages and date of the max draw down'''
if not len(rolling_dd_dates) or pd.isnull(mdd_date): return pd.NaT
assert(len(rolling_dd_dates) == len(rolling_dd))
return rolling_dd_dates[(rolling_dd <= 0) & (rolling_dd_dates < mdd_date)][-1]
def compute_mar(returns: np.ndarray, periods_per_year: float, mdd_pct: float) -> float:
'''Compute MAR ratio, which is annualized return divided by biggest drawdown since inception.'''
if not len(returns) or np.isnan(mdd_pct) or mdd_pct == 0: return np.nan
return np.mean(returns) * periods_per_year / mdd_pct
def compute_dates_3yr(timestamps: np.ndarray) -> np.ndarray:
''' Given an array of numpy datetimes, return those that are within 3 years of the last date in the array'''
if not len(timestamps): return np.array([], dtype='M8[D]')
last_date = timestamps[-1]
d = pd.to_datetime(last_date)
start_3yr = np.datetime64(d.replace(year=d.year - 3))
return timestamps[timestamps > start_3yr]
def compute_returns_3yr(timestamps: np.ndarray, returns: np.ndarray) -> np.ndarray:
'''Given an array of numpy datetimes and an array of returns, return those that are within 3 years
of the last date in the datetime array '''
if not len(timestamps): return np.array([], dtype=np.float)
assert(len(timestamps) == len(returns))
timestamps_3yr = compute_dates_3yr(timestamps)
return returns[timestamps >= timestamps_3yr[0]]
def compute_rolling_dd_3yr(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
'''Compute rolling drawdowns over the last 3 years'''
if not len(timestamps): return np.array([], dtype='M8[D]')
last_date = timestamps[-1]
d = pd.to_datetime(last_date)
start_3yr = np.datetime64(d.replace(year=d.year - 3))
equity = equity[timestamps >= start_3yr]
timestamps = timestamps[timestamps >= start_3yr]
return compute_rolling_dd(timestamps, equity)
def compute_maxdd_pct_3yr(rolling_dd_3yr: np.ndarray) -> float:
'''Compute max drawdown percentage over the last 3 years'''
return compute_maxdd_pct(rolling_dd_3yr)
def compute_maxdd_date_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray) -> np.datetime64:
'''Compute max drawdown date over the last 3 years'''
return compute_maxdd_date(rolling_dd_3yr_timestamps, rolling_dd_3yr)
def compute_maxdd_start_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray, mdd_date_3yr: np.datetime64) -> np.datetime64:
'''Comput max drawdown start date over the last 3 years'''
return compute_maxdd_start(rolling_dd_3yr_timestamps, rolling_dd_3yr, mdd_date_3yr)
def compute_calmar(returns_3yr: np.ndarray, periods_per_year: float, mdd_pct_3yr: float) -> float:
'''Compute Calmar ratio, which is the annualized return divided by max drawdown over the last 3 years'''
return compute_mar(returns_3yr, periods_per_year, mdd_pct_3yr)
def compute_bucketed_returns(timestamps: np.ndarray, returns: np.ndarray) -> Tuple[Sequence[int], Sequence[np.ndarray]]:
'''
Bucket returns by year
Returns:
A tuple with the first element being a list of years and the second a list of
numpy arrays containing returns for each corresponding year
'''
assert(len(timestamps) == len(returns))
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
s = pd.Series(returns, index=timestamps)
years_list = []
rets_list = []
for year, rets in s.groupby(s.index.map(lambda x: x.year)):
years_list.append(year)
rets_list.append(rets.values)
return years_list, rets_list
def compute_annual_returns(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> Tuple[np.ndarray, np.ndarray]:
'''Takes the output of compute_bucketed_returns and returns geometric mean of returns by year
Returns:
A tuple with the first element being an array of years (integer) and the second element
an array of annualized returns for those years
'''
assert(len(timestamps) == len(returns) and periods_per_year > 0)
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
df = | pd.DataFrame({'ret': returns, 'timestamp': timestamps}) | pandas.DataFrame |
import pandas as pd
# import matplotlib.pyplot as plt
# import seaborn as sns
import numpy as np
# import copy
# from scipy.stats import norm
# from sklearn import preprocessing
fileName = '/home/kazim/Desktop/projects/IE490/input/tubitak_data2_processesed2.csv'
df = pd.read_csv(fileName, sep = ',')
#preview data
df_prev = df
### Ilce 79
# df.drop(df.index[df.ilce_kod != 79], inplace=True)
# # df
# df.drop('ilce_kod', axis=1, inplace=True)
#
# df.info()
mahalle = df["mahalle_kod"]
ilce = df["ilce_kod"]
# df['mahalle_kod'].describe()
#we can drop yasal burut alani as it has almost 1 correlation with mevcut alan
df = df.drop('yasal_burut_alani', axis=1)
# df_pre = copy.deepcopy(df)
### One Hot Encoding for Categorical Variables
df = | pd.get_dummies(df, columns=["ilce_kod"]) | pandas.get_dummies |
from contextlib import contextmanager
import struct
import tracemalloc
import numpy as np
import pytest
from pandas._libs import hashtable as ht
import pandas as pd
import pandas._testing as tm
from pandas.core.algorithms import isin
@contextmanager
def activated_tracemalloc():
tracemalloc.start()
try:
yield
finally:
tracemalloc.stop()
def get_allocated_khash_memory():
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces(
(tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)
)
return sum(map(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.PyObjectHashTable, np.object_),
(ht.Complex128HashTable, np.complex128),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
(ht.Complex64HashTable, np.complex64),
(ht.Int32HashTable, np.int32),
(ht.UInt32HashTable, np.uint32),
(ht.Float32HashTable, np.float32),
(ht.Int16HashTable, np.int16),
(ht.UInt16HashTable, np.uint16),
(ht.Int8HashTable, np.int8),
(ht.UInt8HashTable, np.uint8),
(ht.IntpHashTable, np.intp),
],
)
class TestHashTable:
def test_get_set_contains_len(self, table_type, dtype):
index = 5
table = table_type(55)
assert len(table) == 0
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index + 1, 41)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 42
assert table.get_item(index + 1) == 41
table.set_item(index, 21)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 21
assert table.get_item(index + 1) == 41
assert index + 2 not in table
with pytest.raises(KeyError, match=str(index + 2)):
table.get_item(index + 2)
def test_map_keys_to_values(self, table_type, dtype, writable):
# only Int64HashTable has this method
if table_type == ht.Int64HashTable:
N = 77
table = table_type()
keys = np.arange(N).astype(dtype)
vals = np.arange(N).astype(np.int64) + N
keys.flags.writeable = writable
vals.flags.writeable = writable
table.map_keys_to_values(keys, vals)
for i in range(N):
assert table.get_item(keys[i]) == i + N
def test_map_locations(self, table_type, dtype, writable):
N = 8
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
for i in range(N):
assert table.get_item(keys[i]) == i
def test_lookup(self, table_type, dtype, writable):
N = 3
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
result = table.lookup(keys)
expected = np.arange(N)
tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))
def test_lookup_wrong(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 100
else:
N = 512
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
table.map_locations(keys)
wrong_keys = np.arange(N).astype(dtype)
result = table.lookup(wrong_keys)
assert np.all(result == -1)
def test_unique(self, table_type, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 88
else:
N = 1000
table = table_type()
expected = (np.arange(N) + N).astype(dtype)
keys = np.repeat(expected, 5)
keys.flags.writeable = writable
unique = table.unique(keys)
tm.assert_numpy_array_equal(unique, expected)
def test_tracemalloc_works(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 256
else:
N = 30000
keys = np.arange(N).astype(dtype)
with activated_tracemalloc():
table = table_type()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty(self, table_type, dtype):
with activated_tracemalloc():
table = table_type()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_get_state(self, table_type, dtype):
table = table_type(1000)
state = table.get_state()
assert state["size"] == 0
assert state["n_occupied"] == 0
assert "n_buckets" in state
assert "upper_bound" in state
@pytest.mark.parametrize("N", range(1, 110))
def test_no_reallocation(self, table_type, dtype, N):
keys = np.arange(N).astype(dtype)
preallocated_table = table_type(N)
n_buckets_start = preallocated_table.get_state()["n_buckets"]
preallocated_table.map_locations(keys)
n_buckets_end = preallocated_table.get_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much preallocated)
clean_table = table_type()
clean_table.map_locations(keys)
assert n_buckets_start == clean_table.get_state()["n_buckets"]
class TestHashTableUnsorted:
# TODO: moved from test_algos; may be redundancies with other tests
def test_string_hashtable_set_item_signature(self):
# GH#30419 fix typing in StringHashTable.set_item to prevent segfault
tbl = ht.StringHashTable()
tbl.set_item("key", 1)
assert tbl.get_item("key") == 1
with pytest.raises(TypeError, match="'key' has incorrect type"):
# key arg typed as string, not object
tbl.set_item(4, 6)
with pytest.raises(TypeError, match="'val' has incorrect type"):
tbl.get_item(4)
def test_lookup_nan(self, writable):
# GH#21688 ensure we can deal with readonly memory views
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
xs.setflags(write=writable)
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
def test_add_signed_zeros(self):
# GH#21866 inconsistent hash-function for float64
# default hash-function would lead to different hash-buckets
# for 0.0 and -0.0 if there are more than 2^30 hash-buckets
# but this would mean 16GB
N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
m = ht.Float64HashTable(N)
m.set_item(0.0, 0)
m.set_item(-0.0, 0)
assert len(m) == 1 # 0.0 and -0.0 are equivalent
def test_add_different_nans(self):
# GH#21866 inconsistent hash-function for float64
# create different nans from bit-patterns:
NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
assert NAN1 != NAN1
assert NAN2 != NAN2
# default hash function would lead to different hash-buckets
# for NAN1 and NAN2 even if there are only 4 buckets:
m = ht.Float64HashTable()
m.set_item(NAN1, 0)
m.set_item(NAN2, 0)
assert len(m) == 1 # NAN1 and NAN2 are equivalent
def test_lookup_overflow(self, writable):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
# GH 21688 ensure we can deal with readonly memory views
xs.setflags(write=writable)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
@pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
@pytest.mark.parametrize(
"htable, uniques, dtype, safely_resizes",
[
(ht.PyObjectHashTable, ht.ObjectVector, "object", False),
(ht.StringHashTable, ht.ObjectVector, "object", True),
(ht.Float64HashTable, ht.Float64Vector, "float64", False),
(ht.Int64HashTable, ht.Int64Vector, "int64", False),
(ht.Int32HashTable, ht.Int32Vector, "int32", False),
(ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
],
)
def test_vector_resize(
self, writable, htable, uniques, dtype, safely_resizes, nvals
):
# Test for memory errors after internal vector
# reallocations (GH 7157)
# Changed from using np.random.rand to range
# which could cause flaky CI failures when safely_resizes=False
vals = np.array(range(1000), dtype=dtype)
# GH 21688 ensures we can deal with read-only memory views
vals.setflags(write=writable)
# initialise instances; cannot initialise in parametrization,
# as otherwise external views would be held on the array (which is
# one of the things this test is checking)
htable = htable()
uniques = uniques()
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() sets an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (except for StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError, match="external reference.*"):
htable.get_labels(vals, uniques, 0, -1)
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
@pytest.mark.parametrize(
"hashtable",
[
ht.PyObjectHashTable,
ht.StringHashTable,
ht.Float64HashTable,
ht.Int64HashTable,
ht.Int32HashTable,
ht.UInt64HashTable,
],
)
def test_hashtable_large_sizehint(self, hashtable):
# GH#22729 smoketest for not raising when passing a large size_hint
size_hint = np.iinfo(np.uint32).max + 1
hashtable(size_hint=size_hint)
class TestPyObjectHashTableWithNans:
def test_nan_float(self):
nan1 = float("nan")
nan2 = float("nan")
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_complex_both(self):
nan1 = complex(float("nan"), float("nan"))
nan2 = complex(float("nan"), float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_complex_real(self):
nan1 = complex(float("nan"), 1)
nan2 = complex(float("nan"), 1)
other = complex(float("nan"), 2)
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.get_item(other)
assert str(error.value) == str(other)
def test_nan_complex_imag(self):
nan1 = complex(1, float("nan"))
nan2 = complex(1, float("nan"))
other = complex(2, float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.get_item(other)
assert str(error.value) == str(other)
def test_nan_in_tuple(self):
nan1 = (float("nan"),)
nan2 = (float("nan"),)
assert nan1[0] is not nan2[0]
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_in_nested_tuple(self):
nan1 = (1, (2, (float("nan"),)))
nan2 = (1, (2, (float("nan"),)))
other = (1, 2)
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=None) as error:
table.get_item(other)
assert str(error.value) == str(other)
def test_hash_equal_tuple_with_nans():
a = (float("nan"), (float("nan"), float("nan")))
b = (float("nan"), (float("nan"), float("nan")))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_get_labels_groupby_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, unique = table.get_labels_groupby(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
expected_unique = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr, expected_arr)
tm.assert_numpy_array_equal(unique, expected_unique)
def test_tracemalloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
with activated_tracemalloc():
table = ht.StringHashTable()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty_StringHashTable():
with activated_tracemalloc():
table = ht.StringHashTable()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110))
def test_no_reallocation_StringHashTable(N):
keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
preallocated_table = ht.StringHashTable(N)
n_buckets_start = preallocated_table.get_state()["n_buckets"]
preallocated_table.map_locations(keys)
n_buckets_end = preallocated_table.get_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much preallocated)
clean_table = ht.StringHashTable()
clean_table.map_locations(keys)
assert n_buckets_start == clean_table.get_state()["n_buckets"]
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
class TestHashTableWithNans:
def test_get_set_contains_len(self, table_type, dtype):
index = float("nan")
table = table_type()
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index, 41)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 41
def test_map_locations(self, table_type, dtype):
N = 10
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
table.map_locations(keys)
assert len(table) == 1
assert table.get_item(np.nan) == N - 1
def test_unique(self, table_type, dtype):
N = 1020
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
unique = table.unique(keys)
assert np.all(np.isnan(unique)) and len(unique) == 1
def test_unique_for_nan_objects_floats():
table = ht.PyObjectHashTable()
keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
unique = table.unique(keys)
assert len(unique) == 1
def test_unique_for_nan_objects_complex():
table = ht.PyObjectHashTable()
keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
unique = table.unique(keys)
assert len(unique) == 1
def test_unique_for_nan_objects_tuple():
table = ht.PyObjectHashTable()
keys = np.array(
[1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
)
unique = table.unique(keys)
assert len(unique) == 2
@pytest.mark.parametrize(
"dtype",
[
np.object_,
np.complex128,
np.int64,
np.uint64,
np.float64,
np.complex64,
np.int32,
np.uint32,
np.float32,
np.int16,
np.uint16,
np.int8,
np.uint8,
np.intp,
],
)
class TestHelpFunctions:
def test_value_count(self, dtype, writable):
N = 43
expected = (np.arange(N) + N).astype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
def test_value_count_stable(self, dtype, writable):
# GH12679
values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
values.flags.writeable = writable
keys, counts = ht.value_count(values, False)
tm.assert_numpy_array_equal(keys, values)
assert np.all(counts == 1)
def test_duplicated_first(self, dtype, writable):
N = 100
values = np.repeat(np.arange(N).astype(dtype), 5)
values.flags.writeable = writable
result = ht.duplicated(values)
expected = np.ones_like(values, dtype=np.bool_)
expected[::5] = False
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, writable):
N = 127
arr = np.arange(N).astype(dtype)
values = np.arange(N).astype(dtype)
arr.flags.writeable = writable
values.flags.writeable = writable
result = ht.ismember(arr, values)
expected = np.ones_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype):
N = 17
arr = np.arange(N).astype(dtype)
values = (np.arange(N) + N).astype(dtype)
result = ht.ismember(arr, values)
expected = np.zeros_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 53
else:
N = 11111
values = np.repeat(np.arange(N).astype(dtype), 5)
values[0] = 42
values.flags.writeable = writable
result = ht.mode(values, False)
assert result == 42
def test_mode_stable(self, dtype, writable):
values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
values.flags.writeable = writable
keys = | ht.mode(values, False) | pandas._libs.hashtable.mode |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import powerlaw
import sys
from matplotlib.ticker import MaxNLocator
from data import prepare_data
from synthetic_data import SyntheticGraphGenerator
from basic_algorithms import get_connected_components, compute_diameter, compute_girth, compute_degree_sequence
print(sys.getrecursionlimit())
sys.setrecursionlimit(11000)
print(sys.getrecursionlimit())
def distribution_of_connected_components(graph, name=''):
cc = get_connected_components(graph)
# Count connected components sizes
cc_sizes = list(map(lambda comp: len(comp), cc))
# Convert to dataframe for easier plotting
cc_sizes = pd.DataFrame(cc_sizes, columns=['size'])
# Aggregate by size
cc_sizes = cc_sizes['size'].value_counts().sort_index()
# Determine if graph has a `big component`
sizes = cc_sizes.iloc[-2:].index.tolist()
counts = cc_sizes.iloc[-2:].tolist()
big_component = False
if sizes[-1] > len(graph.nodes) / 100 and counts[-1] == 1: # a single big component
big_component = True
if len(sizes) == 2 and sizes[-2] > np.log(len(graph.nodes)) / 2:
big_component = False
# Plot count for unique sizes
cc_sizes.plot(kind='bar', rot=0)
plt.title(f'Connected components size distribution\n{name}')
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
BIG = mpatches.Patch(color='green', label='Big component')
MEH = mpatches.Patch(color='red', label='Not big enough')
plt.legend(handles=[BIG, MEH])
if big_component:
plt.gca().get_xticklabels()[-1].set_color("green")
else:
plt.gca().get_xticklabels()[-1].set_color("red")
plt.xlabel('Size')
plt.ylabel('Count')
plt.tight_layout()
plt.show()
def scale_free_classification(graph, name):
degree = compute_degree_sequence(graph)
degree_sequence = sorted(degree.values())
results = powerlaw.Fit(degree_sequence, xmin=1, discrete=True)
R, p = results.distribution_compare('power_law', 'lognormal')
is_scale_free = R > 0
# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5, 3))
fig = plt.figure(figsize=(17, 6.5), dpi=100)
ax = fig.add_subplot(111, frame_on=False)
ax.tick_params(labelcolor="none", bottom=False, left=False)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
df = | pd.DataFrame(degree_sequence, columns=['degree']) | pandas.DataFrame |
import pandas as pd
from time import sleep
import csv
from datetime import datetime
import time
ra= 10
Entree420mA = '4-20mA'
EntréeTension = '0-20V'
EntréeAutre = 'Autre'
BrandAdafruit = 'Adafruit'
ProductRefVMSB = 'VM-SB'
sleepmillisecond=0.1
sleepsecond=1
sleep10second=10
sleep30second=30
sleepminut=60
sleephour=3600
now = datetime.now()
# Dataframes du Sensor 1 dans le Can Bus 1 pour la Sensor Box 1
dfC1SB1S1 = | pd.DataFrame(columns=['Type','Value','Input Type','Product Ref','Brand','Name','Date']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = | pd.Series([0, 0, 0, 0], dtype='float64') | pandas.Series |
import pandas as pd
from pathlib import Path
import numpy as np
import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse, mean_absolute_error as mae
from scipy.fft import fft, ifft
### Lale dependencies
import lale
from lale.lib.lale import NoOp, Hyperopt
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.preprocessing import StandardScaler as Standard
from xgboost import XGBRegressor as XGB
from lale.lib.sklearn import GradientBoostingRegressor as GradBoost, ExtraTreesRegressor, KNeighborsRegressor as KNN
from lale.lib.lightgbm import LGBMRegressor as LGBM
lale.wrap_imported_operators()
def read_file(fname):
# the structure for oxygen, temp, and adcp are the same so
# just read the file for each into df
df = | pd.read_csv(fname) | pandas.read_csv |
from keras.layers import Input, Dense, concatenate
from keras.layers.recurrent import GRU
from keras.utils import plot_model
from keras.models import Model, load_model
from keras.callbacks import ModelCheckpoint
import keras
import pandas as pd
import numpy as np
import keras.backend as K
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
from multiprocessing import Pool, cpu_count
import pickle
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
dataset = "cb12/"
path = "../../data/"
interim_path = path + dataset + "interim/"
processed_path = path + dataset + "processed/"
model_path = "models/"
model_path_valid = "models/valid/"
def TOP1(y_true, y_pred):
y1 = y_pred * y_true
y2 = K.sum(y1, axis=1)[:, np.newaxis]
y3 = y_true - y1
return (K.sum(K.sigmoid(y_pred - y2)) + y3 * y3) / tf.cast(tf.shape(y_true)[0], tf.float32)
loss = TOP1
def create_prnn_model(left_input_size, right_input_size, batch_size = 512, hidden_units = 100, o_activation='softmax', lr = 0.001):
emb_size = 50
size = emb_size
# left input - item vector
input_left = Input(batch_shape=(batch_size, 1, left_input_size), name='input_left')
gru_left, gru_left_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_left')(input_left)
# right input - feature vector
input_right = Input(batch_shape=(batch_size, 1, right_input_size), name='input_right')
gru_right, gru_right_states = GRU(hidden_units, stateful=True, return_state=True, name='gru_right')(input_right)
# merging both layers and creating the model
merged = concatenate([gru_left, gru_right])
#change softmax per another activation funciton?
output = Dense(left_input_size, activation=o_activation, name='output')(merged)
model = Model(inputs=[input_left, input_right], outputs=output, name='gru4rec')
encoder = Model(inputs=[input_left, input_right], outputs=merged)
# define model's optimizer
#optimizer = optim.Optimizer(optimizer=self.optimizer, lr=self.lr)
#opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
opt = keras.optimizers.Adagrad(lr=lr)
# define model's loss function --> implement here the top1 loss function
# loss_function = loss.LossFunction(loss_type=self.loss_function)
#model.compile(loss=loss_function, optimizer=opt, metrics=['accuracy'])
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
filepath = model_path_valid + 'prnn_cb12_checkpoint.h5'
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=2, save_best_only=True, mode='min')
callbacks_list = []
model.summary()
#plot_model(model, show_shapes=True, to_file='rnn-structure.png')
return model, encoder
def get_states(model):
#return the actual states of the layers
return [K.get_value(s) for s,_ in model.state_updates]
def freeze_layer(model, layer_name, lr):
if layer_name == 'gru_left':
# gru left layer will not be trained this mini batch
model.get_layer(layer_name).trainable = False
# but gru right will
model.get_layer('gru_right').trainable = True
elif layer_name == 'gru_right':
# gru right layer will not be trained this mini batch
model.get_layer(layer_name).trainable = False
# but gru left will
model.get_layer('gru_left').trainable = True
else:
raise NotImplementedError
# opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
opt = keras.optimizers.Adagrad(lr=lr)
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
return model
class SessionDataset:
"""Credit to yhs-968/pyGRU4REC."""
def __init__(self, data, sep='\t', session_key='session_id', item_key='item_id', time_key='created_at', n_samples=-1, itemmap=None, time_sort=False):
"""
Args:
path: path of the csv file
sep: separator for the csv
session_key, item_key, time_key: name of the fields corresponding to the sessions, items, time
n_samples: the number of samples to use. If -1, use the whole dataset.
itemmap: mapping between item IDs and item indices
time_sort: whether to sort the sessions by time or not
"""
self.df = data
self.session_key = session_key
self.item_key = item_key
self.time_key = time_key
self.time_sort = time_sort
self.add_item_indices(itemmap=itemmap)
self.df.sort_values([session_key, time_key], inplace=True)
# Sort the df by time, and then by session ID. That is, df is sorted by session ID and
# clicks within a session are next to each other, where the clicks within a session are time-ordered.
self.click_offsets = self.get_click_offsets()
#array of the positions where there is a change of session.
#len = len(session_idx_arr) + 1
self.session_idx_arr = self.order_session_idx()
#array of sessions [0 1 2 3 4 .... n-1]
def get_click_offsets(self):
"""
Return the offsets of the beginning clicks of each session IDs,
where the offset is calculated against the first click of the first session ID.
"""
offsets = np.zeros(self.df[self.session_key].nunique() + 1, dtype=np.int32)
# group & sort the df by session_key and get the offset values
offsets[1:] = self.df.groupby(self.session_key).size().cumsum()
return offsets
def order_session_idx(self):
""" Order the session indices """
if self.time_sort:
# starting time for each sessions, sorted by session IDs
sessions_start_time = self.df.groupby(self.session_key)[self.time_key].min().values
# order the session indices by session starting times
session_idx_arr = np.argsort(sessions_start_time)
else:
session_idx_arr = np.arange(self.df[self.session_key].nunique())
return session_idx_arr
def add_item_indices(self, itemmap=None):
"""
Add item index column named "item_idx" to the df
Args:
itemmap (pd.DataFrame): mapping between the item Ids and indices
"""
if itemmap is None:
item_ids = self.df[self.item_key].unique() # unique item ids
item2idx = pd.Series(data=np.arange(len(item_ids)),
index=item_ids)
itemmap = pd.DataFrame({self.item_key:item_ids,
'item_idx':item2idx[item_ids].values})
self.itemmap = itemmap
self.df = pd.merge(self.df, self.itemmap, on=self.item_key, how='inner')
@property
def items(self):
return self.itemmap.item_id.unique()
class SessionDataLoader:
"""Credit to yhs-968/pyGRU4REC."""
def __init__(self, dataset, batch_size):
"""
A class for creating session-parallel mini-batches.
Args:
dataset (SessionDataset): the session dataset to generate the batches from
batch_size (int): size of the batch
"""
self.dataset = dataset
self.batch_size = batch_size
self.done_sessions_counter = 0
def __iter__(self):
""" Returns the iterator for producing session-parallel training mini-batches.
Yields:
input (B,): Item indices that will be encoded as one-hot vectors later.
target (B,): a Variable that stores the target item indices
masks: Numpy array indicating the positions of the sessions to be terminated
"""
df = self.dataset.df
session_key='session_id'
item_key='item_id'
time_key='created_at'
self.n_items = df[item_key].nunique()
click_offsets = self.dataset.click_offsets
#print(click_offsets)
session_idx_arr = self.dataset.session_idx_arr
#print(session_idx_arr)
iters = np.arange(self.batch_size)
#iters = np.arange(1)
maxiter = iters.max()
start = click_offsets[session_idx_arr[iters]]
end = click_offsets[session_idx_arr[iters] + 1]
#print(start)
#print(end)
mask = [] # indicator for the sessions to be terminated
finished = False
while not finished:
#minimum lenght of all the sessions
minlen = (end - start).min()
# Item indices (for embedding) for clicks where the first sessions start
idx_target = df.item_idx.values[start]
for i in range(minlen - 1):
# Build inputs & targets
idx_input = idx_target
idx_target = df.item_idx.values[start + i + 1]
inp = idx_input
target = idx_target
yield inp, target, mask
# click indices where a particular session meets second-to-last element
start = start + (minlen - 1)
# see if how many sessions should terminate
mask = np.arange(len(iters))[(end - start) <= 1]
self.done_sessions_counter = len(mask)
for idx in mask:
maxiter += 1
if maxiter >= len(click_offsets) - 1:
finished = True
break
# update the next starting/ending point
iters[idx] = maxiter
start[idx] = click_offsets[session_idx_arr[maxiter]]
end[idx] = click_offsets[session_idx_arr[maxiter] + 1]
def train_prnn(model, lr, loader, layer_freezing_enabled = False, num_epochs = 10):
for epoch in range(0, num_epochs):
print("Epoch: " + str(epoch+1))
epoch_loss = 0
i = 0
for feat, target, mask in loader:
#feat = np array size BATCH_SIZE with the item indexes of the first items of the first BATCH_SIZE sessions
#comvert feat to an array size (BATCH_SIZE, 26723) of one hot encoding the indes with loader.n_items
input_oh = to_categorical(feat, num_classes=loader.n_items)
#convert from shape (BATCH_SIZE, 26723) to (BATCH_SIZE, 1, 26723)
input_oh = np.expand_dims(input_oh, axis=1)
# with the argmax function you get back again the feat/target np array (arg_input = feat)
### arg_input = np.argmax(to_categorical(feat, num_classes=loader.n_items), axis=1)
### arg_output = np.argmax(to_categorical(target, num_classes=loader.n_items), axis=1)
input_feature = np.array([])
for line in feat:
#result = int(mapitem[(mapitem.item_idx == line)].item_id.values)
result = str(mapitem[(mapitem.item_idx == line)].item_id.values[0])
#print(result)
# use empty feature vec if missing
feature_vector = empty_feature_vec
if result in item_encodings.keys():
feature_vector = item_encodings[result]
input_feature = np.append(input_feature, feature_vector)
input_feature = input_feature.reshape(batch_size, 1, feature_size)
#target = np array size BATCH_SIZE with the item indexes of the TARGET items of the feat array items
target_oh = to_categorical(target, num_classes=loader.n_items)
#calculate the loss between the input and the expected output
if layer_freezing_enabled:
if i % 2 is 0:
model = freeze_layer(model, 'gru_left', lr = lr)
else:
model = freeze_layer(model, 'gru_right', lr = lr)
tr_loss = model.train_on_batch([input_oh, input_feature], target_oh)
epoch_loss += tr_loss[0]
i = i + 1
print("Epoch loss: " + str(epoch_loss))
return model
# # Set data for final training
# set data
train_path = '../../data/' + dataset + 'processed/train_14d.csv'
train = pd.read_csv(train_path, sep='\t')[['session_id', 'item_id', 'created_at']]
interactions = | pd.read_csv('../../data/' + dataset + 'interim/interactions.csv', header=0, sep='\t') | pandas.read_csv |
from typing import Dict, Optional, Union, cast
import numpy as np
import pandas as pd
from fseval.pipeline.estimator import Estimator
from fseval.types import AbstractEstimator, AbstractMetric, Callback
class UploadFeatureImportances(AbstractMetric):
def _build_table(self, feature_vector: np.ndarray):
"""Takes a feature importances vector of type (n_features) or
(n_classes, n_features)."""
# feature_vector is of form (n_features)
if feature_vector.ndim == 1:
indices = np.arange(1, len(feature_vector) + 1)
df = pd.DataFrame(
{
"feature_importances": feature_vector,
"feature_index": indices,
"class": None,
}
)
return df
# feature_vector is of form (n_classes, n_features)
elif feature_vector.ndim == 2:
df = pd.DataFrame()
for class_index, feature_vector_class in enumerate(feature_vector):
indices = np.arange(1, len(feature_vector_class) + 1)
df_class = pd.DataFrame(
{
"feature_importances": feature_vector_class,
"feature_index": indices,
"class": class_index,
}
)
df = df.append(df_class)
return df
else:
raise ValueError(
"`feature_importances` must be either 1- or 2-dimensional."
)
def _normalize_feature_importances(self, feature_importances: np.ndarray):
"""Normalized feature importances. The summation of the importances
vector is always 1."""
feature_importances = np.asarray(feature_importances)
# get ranker feature importances, check whether all components > 0
assert not (feature_importances < 0).any(), (
"Estimated or ground-truth feature importances must be strictly positive."
+ " Some feature importance scores were negative."
)
# normalize
if feature_importances.ndim == 1:
feature_importances = feature_importances / sum(feature_importances)
elif feature_importances.ndim == 2:
feature_importances_rowsum = feature_importances.sum(axis=1, keepdims=True)
feature_importances = feature_importances / feature_importances_rowsum
else:
raise ValueError(
"`feature_importances` must be either 1- or 2-dimensional."
)
return feature_importances
def score_ranking(
self,
scores: Union[Dict, pd.DataFrame],
ranker: AbstractEstimator,
bootstrap_state: int,
callbacks: Callback,
feature_importances: Optional[np.ndarray] = None,
) -> Union[Dict, pd.DataFrame]:
ranker = cast(Estimator, ranker)
table = | pd.DataFrame() | pandas.DataFrame |
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import locan.data.metadata_pb2
from locan import ROOT_DIR, LocData
from locan.dependencies import HAS_DEPENDENCY
from locan.locan_io.locdata.io_locdata import load_rapidSTORM_file, load_txt_file
logger = logging.getLogger(__name__)
for key, value in HAS_DEPENDENCY.items():
if not value:
logger.info(f"Extra dependency {key} is not available.")
# register pytest markers - should be in sync with pyproject.toml
def pytest_configure(config):
config.addinivalue_line(
"markers", "gui: marks tests that require user interaction (skipped by default)"
)
config.addinivalue_line(
"markers",
"visual: marks tests that require visual inspection (skipped by default)",
)
config.addinivalue_line(
"markers",
"requires_datasets: marks tests that require data in locan_datasets directory (skipped by default)",
)
# fixtures for random points
@pytest.fixture(scope="session")
def few_random_points():
points = np.array(
[
[0.066, 0.64],
[0.92, 0.65],
[0.11, 0.40],
[0.20, 0.17],
[0.75, 0.92],
[0.01, 0.12],
[0.23, 0.54],
[0.05, 0.25],
[0.70, 0.73],
[0.43, 0.16],
]
)
return points
# fixtures for LocData objects
@pytest.fixture(scope="session")
def locdata_empty():
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import re
import os
import gc
import glob
import keras
import numbers
import tldextract
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from itertools import chain
from keras.models import Model
from keras.models import load_model
import matplotlib.pyplot as plt
from collections import Counter
from sklearn import preprocessing
from gensim.models import FastText
from sklearn.decomposition import PCA
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from keras.layers import Input, Embedding, LSTM, Dense, Bidirectional, Dropout
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import warnings
warnings.filterwarnings("ignore")
# Initializing tqdm for pandas
tqdm.pandas()
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
print([x.name for x in local_device_protos if x.device_type == 'GPU'])
np.random.seed(0)
# Get the kinds of ids associated with each tuple
def update_ids(x):
kinds_of_ids = set()
for item in x:
kinds_of_ids.add(item[0])
return kinds_of_ids
##################################################################################################
# ##################################################################################################
# ## Newspaper data ##
## Get the top 150 sections which we got from training the 2.7 million citations
largest_sections = pd.read_csv('/dlabdata1/harshdee/largest_sections.csv', header=None)
largest_sections.rename({0: 'section_name', 1: 'count'}, axis=1, inplace=True)
original_tag_counts = | pd.read_csv('/dlabdata1/harshdee/tag_counts.csv', header=None) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.errors import ParserWarning
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_list_like
from collections import OrderedDict
from modin.db_conn import ModinDatabaseConnection, UnsupportedDatabaseException
from modin.config import TestDatasetSize, Engine, StorageFormat, IsExperimental
from modin.utils import to_pandas
from modin.pandas.utils import from_arrow
import pyarrow as pa
import os
import sys
import shutil
import sqlalchemy as sa
import csv
import tempfile
from .utils import (
check_file_leaks,
df_equals,
json_short_string,
json_short_bytes,
json_long_string,
json_long_bytes,
get_unique_filename,
io_ops_bad_exc,
eval_io_from_str,
dummy_decorator,
create_test_dfs,
COMP_TO_EXT,
teardown_test_file,
teardown_test_files,
generate_dataframe,
)
if StorageFormat.get() == "Omnisci":
from modin.experimental.core.execution.native.implementations.omnisci_on_native.test.utils import (
eval_io,
align_datetime_dtypes,
)
else:
from .utils import eval_io
if StorageFormat.get() == "Pandas":
import modin.pandas as pd
else:
import modin.experimental.pandas as pd
try:
import ray
EXCEPTIONS = (ray.exceptions.WorkerCrashedError,)
except ImportError:
EXCEPTIONS = ()
from modin.config import NPartitions
NPartitions.put(4)
DATASET_SIZE_DICT = {
"Small": 64,
"Normal": 2000,
"Big": 20000,
}
# Number of rows in the test file
NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT["Small"])
TEST_DATA = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
def assert_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def setup_clipboard(row_size=NROWS):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def parquet_eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""
Helper function to test `to_parquet` method.
Parameters
----------
modin_obj : pd.DataFrame
A Modin DataFrame or a Series to test `to_parquet` method.
pandas_obj: pandas.DataFrame
A pandas DataFrame or a Series to test `to_parquet` method.
fn : str
Name of the method, that should be tested.
extension : str
Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
pandas_df = pandas.read_parquet(unique_filename_pandas)
modin_df = pd.read_parquet(unique_filename_modin)
df_equals(pandas_df, modin_df)
finally:
teardown_test_file(unique_filename_pandas)
try:
teardown_test_file(unique_filename_modin)
except IsADirectoryError:
shutil.rmtree(unique_filename_modin)
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""Helper function to test `to_<extension>` methods.
Args:
modin_obj: Modin DataFrame or Series to test `to_<extension>` method.
pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method.
fn: name of the method, that should be tested.
extension: Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
# parameter `max_retries=0` is set for `to_csv` function on Ray engine,
# in order to increase the stability of tests, we repeat the call of
# the entire function manually
last_exception = None
for _ in range(3):
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
except EXCEPTIONS as exc:
last_exception = exc
continue
break
else:
raise last_exception
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
finally:
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.usefixtures("TestReadCSVFixture")
@pytest.mark.skipif(
IsExperimental.get() and StorageFormat.get() == "Pyarrow",
reason="Segmentation fault; see PR #2347 ffor details",
)
class TestCsv:
# delimiter tests
@pytest.mark.parametrize("sep", [None, "_", ",", ".", "\n"])
@pytest.mark.parametrize("delimiter", ["_", ",", ".", "\n"])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
def test_read_csv_delimiters(
self, make_csv_file, sep, delimiter, decimal, thousands
):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
thousands_separator=thousands,
decimal_separator=decimal,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
delimiter=delimiter,
sep=sep,
decimal=decimal,
thousands=thousands,
)
# Column and Index Locations and Names tests
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize("index_col", [None, "col1"])
@pytest.mark.parametrize("prefix", [None, "_", "col"])
@pytest.mark.parametrize(
"names", [lib.no_default, ["col1"], ["c1", "c2", "c3", "c4", "c5", "c6", "c7"]]
)
@pytest.mark.parametrize(
"usecols", [None, ["col1"], ["col1", "col2", "col6"], [0, 1, 5]]
)
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_col_handling(
self,
header,
index_col,
prefix,
names,
usecols,
skip_blank_lines,
):
if names is lib.no_default:
pytest.skip("some parameters combiantions fails: issue #2312")
if header in ["infer", None] and names is not lib.no_default:
pytest.skip(
"Heterogeneous data in a column is not cast to a common type: issue #3346"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_blank_lines"],
header=header,
index_col=index_col,
prefix=prefix,
names=names,
usecols=usecols,
skip_blank_lines=skip_blank_lines,
)
@pytest.mark.parametrize("usecols", [lambda col_name: col_name in ["a", "b", "e"]])
def test_from_csv_with_callable_usecols(self, usecols):
fname = "modin/pandas/test/data/test_usecols.csv"
pandas_df = pandas.read_csv(fname, usecols=usecols)
modin_df = pd.read_csv(fname, usecols=usecols)
df_equals(modin_df, pandas_df)
# General Parsing Configuration
@pytest.mark.parametrize("dtype", [None, True])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize(
"converters",
[
None,
{
"col1": lambda x: np.int64(x) * 10,
"col2": pandas.to_datetime,
"col4": lambda x: x.replace(":", ";"),
},
],
)
@pytest.mark.parametrize("skipfooter", [0, 10])
def test_read_csv_parsing_1(
self,
dtype,
engine,
converters,
skipfooter,
):
if dtype:
dtype = {
col: "object"
for col in pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], nrows=1
).columns
}
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(converters),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
dtype=dtype,
engine=engine,
converters=converters,
skipfooter=skipfooter,
)
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize(
"skiprows",
[
2,
lambda x: x % 2,
lambda x: x > 25,
lambda x: x > 128,
np.arange(10, 50),
np.arange(10, 50, 2),
],
)
@pytest.mark.parametrize("nrows", [35, None])
@pytest.mark.parametrize(
"names",
[
[f"c{col_number}" for col_number in range(4)],
[f"c{col_number}" for col_number in range(6)],
None,
],
)
@pytest.mark.parametrize("encoding", ["latin1", "windows-1251", None])
def test_read_csv_parsing_2(
self,
make_csv_file,
request,
header,
skiprows,
nrows,
names,
encoding,
):
xfail_case = (
StorageFormat.get() == "Omnisci"
and header is not None
and isinstance(skiprows, int)
and names is None
and nrows is None
)
if xfail_case:
pytest.xfail(
"read_csv fails because of duplicated columns names - issue #3080"
)
if request.config.getoption(
"--simulate-cloud"
).lower() != "off" and is_list_like(skiprows):
pytest.xfail(
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340"
)
if encoding:
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
encoding=encoding,
)
kwargs = {
"filepath_or_buffer": unique_filename
if encoding
else pytest.csvs_names["test_read_csv_regular"],
"header": header,
"skiprows": skiprows,
"nrows": nrows,
"names": names,
"encoding": encoding,
}
if Engine.get() != "Python":
df = pandas.read_csv(**dict(kwargs, nrows=1))
# in that case first partition will contain str
if df[df.columns[0]][df.index[0]] in ["c1", "col1", "c3", "col3"]:
pytest.xfail("read_csv incorrect output with float data - issue #2634")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
**kwargs,
)
@pytest.mark.parametrize("true_values", [["Yes"], ["Yes", "true"], None])
@pytest.mark.parametrize("false_values", [["No"], ["No", "false"], None])
@pytest.mark.parametrize("skipfooter", [0, 10])
@pytest.mark.parametrize("nrows", [35, None])
def test_read_csv_parsing_3(
self,
true_values,
false_values,
skipfooter,
nrows,
):
xfail_case = (
(false_values or true_values)
and Engine.get() != "Python"
and StorageFormat.get() != "Omnisci"
)
if xfail_case:
pytest.xfail("modin and pandas dataframes differs - issue #2446")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_yes_no"],
true_values=true_values,
false_values=false_values,
skipfooter=skipfooter,
nrows=nrows,
)
def test_read_csv_skipinitialspace(self):
unique_filename = get_unique_filename()
str_initial_spaces = (
"col1,col2,col3,col4\n"
"five, six, seven, eight\n"
" five, six, seven, eight\n"
"five, six, seven, eight\n"
)
eval_io_from_str(str_initial_spaces, unique_filename, skipinitialspace=True)
@pytest.mark.parametrize(
"test_case",
["single_element", "single_column", "multiple_columns"],
)
def test_read_csv_squeeze(self, request, test_case):
if request.config.getoption("--simulate-cloud").lower() != "off":
pytest.xfail(
reason="Error EOFError: stream has been closed in `modin in the cloud` mode - issue #3329"
)
unique_filename = get_unique_filename()
str_single_element = "1"
str_single_col = "1\n2\n3\n"
str_four_cols = "1, 2, 3, 4\n5, 6, 7, 8\n9, 10, 11, 12\n"
case_to_data = {
"single_element": str_single_element,
"single_column": str_single_col,
"multiple_columns": str_four_cols,
}
eval_io_from_str(case_to_data[test_case], unique_filename, squeeze=True)
eval_io_from_str(
case_to_data[test_case], unique_filename, header=None, squeeze=True
)
def test_read_csv_mangle_dupe_cols(self):
if StorageFormat.get() == "Omnisci":
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
unique_filename = get_unique_filename()
str_non_unique_cols = "col,col,col,col\n5, 6, 7, 8\n9, 10, 11, 12\n"
eval_io_from_str(str_non_unique_cols, unique_filename, mangle_dupe_cols=True)
# NA and Missing Data Handling tests
@pytest.mark.parametrize("na_values", ["custom_nan", "73"])
@pytest.mark.parametrize("keep_default_na", [True, False])
@pytest.mark.parametrize("na_filter", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_nans_handling(
self,
na_values,
keep_default_na,
na_filter,
verbose,
skip_blank_lines,
):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_nans"],
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
skip_blank_lines=skip_blank_lines,
)
# Datetime Handling tests
@pytest.mark.parametrize(
"parse_dates", [True, False, ["col2"], ["col2", "col4"], [1, 3]]
)
@pytest.mark.parametrize("infer_datetime_format", [True, False])
@pytest.mark.parametrize("keep_date_col", [True, False])
@pytest.mark.parametrize(
"date_parser", [None, lambda x: pandas.datetime.strptime(x, "%Y-%m-%d")]
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("cache_dates", [True, False])
def test_read_csv_datetime(
self,
parse_dates,
infer_datetime_format,
keep_date_col,
date_parser,
dayfirst,
cache_dates,
):
if (
StorageFormat.get() == "Omnisci"
and isinstance(parse_dates, list)
and ("col4" in parse_dates or 3 in parse_dates)
):
pytest.xfail(
"In some cases read_csv with `parse_dates` with OmniSci storage format outputs incorrect result - issue #3081"
)
raising_exceptions = io_ops_bad_exc # default value
if isinstance(parse_dates, dict) and callable(date_parser):
# In this case raised TypeError: <lambda>() takes 1 positional argument but 2 were given
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(date_parser),
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
infer_datetime_format=infer_datetime_format,
keep_date_col=keep_date_col,
date_parser=date_parser,
dayfirst=dayfirst,
cache_dates=cache_dates,
)
# Iteration tests
@pytest.mark.parametrize("iterator", [True, False])
def test_read_csv_iteration(self, iterator):
filename = pytest.csvs_names["test_read_csv_regular"]
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(filename, chunksize=500, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=500, iterator=iterator)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
def test_read_csv_encoding_976(self):
file_name = "modin/pandas/test/data/issue_976.csv"
names = [str(i) for i in range(11)]
kwargs = {
"sep": ";",
"names": names,
"encoding": "windows-1251",
}
df1 = pd.read_csv(file_name, **kwargs)
df2 = pandas.read_csv(file_name, **kwargs)
# these columns contain data of various types in partitions
# see #1931 for details;
df1 = df1.drop(["4", "5"], axis=1)
df2 = df2.drop(["4", "5"], axis=1)
df_equals(df1, df2)
# Quoting, Compression parameters tests
@pytest.mark.parametrize("compression", ["infer", "gzip", "bz2", "xz", "zip"])
@pytest.mark.parametrize("encoding", [None, "latin8", "utf16"])
@pytest.mark.parametrize("engine", [None, "python", "c"])
def test_read_csv_compression(self, make_csv_file, compression, encoding, engine):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename, encoding=encoding, compression=compression
)
compressed_file_path = (
f"{unique_filename}.{COMP_TO_EXT[compression]}"
if compression != "infer"
else unique_filename
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=compressed_file_path,
compression=compression,
encoding=encoding,
engine=engine,
)
@pytest.mark.parametrize(
"encoding",
[
None,
"ISO-8859-1",
"latin1",
"iso-8859-1",
"cp1252",
"utf8",
pytest.param(
"unicode_escape",
marks=pytest.mark.skip(
condition=sys.version_info < (3, 9),
reason="https://bugs.python.org/issue45461",
),
),
"raw_unicode_escape",
"utf_16_le",
"utf_16_be",
"utf32",
"utf_32_le",
"utf_32_be",
"utf-8-sig",
],
)
def test_read_csv_encoding(self, make_csv_file, encoding):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, encoding=encoding)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
encoding=encoding,
)
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("lineterminator", [None, "x", "\n"])
@pytest.mark.parametrize("escapechar", [None, "d", "x"])
@pytest.mark.parametrize("dialect", ["test_csv_dialect", None])
def test_read_csv_file_format(
self,
make_csv_file,
thousands,
decimal,
lineterminator,
escapechar,
dialect,
):
if Engine.get() != "Python" and lineterminator == "x":
pytest.xfail("read_csv with Ray engine outputs empty frame - issue #2493")
elif Engine.get() != "Python" and escapechar:
pytest.xfail(
"read_csv with Ray engine fails with some 'escapechar' parameters - issue #2494"
)
elif Engine.get() != "Python" and dialect:
pytest.xfail(
"read_csv with Ray engine fails with `dialect` parameter - issue #2508"
)
unique_filename = get_unique_filename()
if dialect:
test_csv_dialect_params = {
"delimiter": "_",
"doublequote": False,
"escapechar": "\\",
"quotechar": "d",
"quoting": csv.QUOTE_ALL,
}
csv.register_dialect(dialect, **test_csv_dialect_params)
dialect = csv.get_dialect(dialect)
make_csv_file(filename=unique_filename, **test_csv_dialect_params)
else:
make_csv_file(
filename=unique_filename,
thousands_separator=thousands,
decimal_separator=decimal,
escapechar=escapechar,
line_terminator=lineterminator,
)
eval_io(
check_exception_type=None, # issue #2320
raising_exceptions=None,
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
thousands=thousands,
decimal=decimal,
lineterminator=lineterminator,
escapechar=escapechar,
dialect=dialect,
)
@pytest.mark.parametrize(
"quoting",
[csv.QUOTE_ALL, csv.QUOTE_MINIMAL, csv.QUOTE_NONNUMERIC, csv.QUOTE_NONE],
)
@pytest.mark.parametrize("quotechar", ['"', "_", "d"])
@pytest.mark.parametrize("doublequote", [True, False])
@pytest.mark.parametrize("comment", [None, "#", "x"])
def test_read_csv_quoting(
self,
make_csv_file,
quoting,
quotechar,
doublequote,
comment,
):
# in these cases escapechar should be set, otherwise error occures
# _csv.Error: need to escape, but no escapechar set"
use_escapechar = (
not doublequote and quotechar != '"' and quoting != csv.QUOTE_NONE
)
escapechar = "\\" if use_escapechar else None
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment_col_char=comment,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment=comment,
)
# Error Handling parameters tests
@pytest.mark.parametrize("warn_bad_lines", [True, False, None])
@pytest.mark.parametrize("error_bad_lines", [True, False, None])
@pytest.mark.parametrize("on_bad_lines", ["error", "warn", "skip", None])
def test_read_csv_error_handling(
self,
warn_bad_lines,
error_bad_lines,
on_bad_lines,
):
# in that case exceptions are raised both by Modin and pandas
# and tests pass
raise_exception_case = on_bad_lines is not None and (
error_bad_lines is not None or warn_bad_lines is not None
)
if (
not raise_exception_case
and Engine.get() not in ["Python", "Cloudpython"]
and StorageFormat.get() != "Omnisci"
):
pytest.xfail("read_csv doesn't raise `bad lines` exceptions - issue #2500")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_bad_lines"],
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
on_bad_lines=on_bad_lines,
)
# Internal parameters tests
@pytest.mark.parametrize("use_str_data", [True, False])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize("delimiter", [",", " "])
@pytest.mark.parametrize("delim_whitespace", [True, False])
@pytest.mark.parametrize("low_memory", [True, False])
@pytest.mark.parametrize("memory_map", [True, False])
@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"])
def test_read_csv_internal(
self,
make_csv_file,
use_str_data,
engine,
delimiter,
delim_whitespace,
low_memory,
memory_map,
float_precision,
):
# In this case raised TypeError: cannot use a string pattern on a bytes-like object,
# so TypeError should be excluded from raising_exceptions list in order to check, that
# the same exceptions are raised by Pandas and Modin
case_with_TypeError_exc = (
engine == "python"
and delimiter == ","
and delim_whitespace
and low_memory
and memory_map
and float_precision is None
)
raising_exceptions = io_ops_bad_exc # default value
if case_with_TypeError_exc:
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
kwargs = {
"engine": engine,
"delimiter": delimiter,
"delim_whitespace": delim_whitespace,
"low_memory": low_memory,
"memory_map": memory_map,
"float_precision": float_precision,
}
unique_filename = get_unique_filename()
if use_str_data:
str_delim_whitespaces = (
"col1 col2 col3 col4\n5 6 7 8\n9 10 11 12\n"
)
eval_io_from_str(
str_delim_whitespaces,
unique_filename,
raising_exceptions=raising_exceptions,
**kwargs,
)
else:
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
raising_exceptions=raising_exceptions,
**kwargs,
)
# Issue related, specific or corner cases
@pytest.mark.parametrize("nrows", [2, None])
def test_read_csv_bad_quotes(self, nrows):
csv_bad_quotes = (
'1, 2, 3, 4\none, two, three, four\nfive, "six", seven, "eight\n'
)
unique_filename = get_unique_filename()
eval_io_from_str(csv_bad_quotes, unique_filename, nrows=nrows)
def test_read_csv_categories(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("encoding_errors", ["strict", "ignore"])
@pytest.mark.parametrize("parse_dates", [False, ["timestamp"]])
@pytest.mark.parametrize("index_col", [None, 0, 2])
@pytest.mark.parametrize("header", ["infer", 0])
@pytest.mark.parametrize(
"names",
[
None,
["timestamp", "symbol", "high", "low", "open", "close", "spread", "volume"],
],
)
def test_read_csv_parse_dates(
self, names, header, index_col, parse_dates, encoding, encoding_errors
):
if names is not None and header == "infer":
pytest.xfail(
"read_csv with Ray engine works incorrectly with date data and names parameter provided - issue #2509"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_time_parsing.csv",
names=names,
header=header,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
encoding_errors=encoding_errors,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_csv_s3(self, storage_options):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="s3://noaa-ghcn-pds/csv/1788.csv",
storage_options=storage_options,
)
@pytest.mark.parametrize("names", [list("XYZ"), None])
@pytest.mark.parametrize("skiprows", [1, 2, 3, 4, None])
def test_read_csv_skiprows_names(self, names, skiprows):
if StorageFormat.get() == "Omnisci" and names is None and skiprows in [1, None]:
# If these conditions are satisfied, columns names will be inferred
# from the first row, that will contain duplicated values, that is
# not supported by `Omnisci` storage format yet.
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2239.csv",
names=names,
skiprows=skiprows,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas(self):
with pytest.warns(UserWarning):
# This tests that we default to pandas on a buffer
from io import StringIO
pd.read_csv(
StringIO(open(pytest.csvs_names["test_read_csv_regular"], "r").read())
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas_url(self):
# We haven't implemented read_csv from https, but if it's implemented, then this needs to change
eval_io(
fn_name="read_csv",
modin_warning=UserWarning,
# read_csv kwargs
filepath_or_buffer="https://raw.githubusercontent.com/modin-project/modin/master/modin/pandas/test/data/blah.csv",
# It takes about ~17Gb of RAM for Omnisci to import the whole table from this test
# because of too many (~1000) string columns in it. Taking a subset of columns
# to be able to run this test on low-RAM machines.
usecols=[0, 1, 2, 3] if StorageFormat.get() == "Omnisci" else None,
)
@pytest.mark.parametrize("nrows", [21, 5, None])
@pytest.mark.parametrize("skiprows", [4, 1, 500, None])
def test_read_csv_newlines_in_quotes(self, nrows, skiprows):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/newlines.csv",
nrows=nrows,
skiprows=skiprows,
cast_to_str=StorageFormat.get() != "Omnisci",
)
def test_read_csv_sep_none(self):
eval_io(
fn_name="read_csv",
modin_warning=ParserWarning,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
sep=None,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_incorrect_data(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.json",
)
@pytest.mark.parametrize(
"kwargs",
[
{"names": [5, 1, 3, 4, 2, 6]},
{"names": [0]},
{"names": None, "usecols": [1, 0, 2]},
{"names": [3, 1, 2, 5], "usecols": [4, 1, 3, 2]},
],
)
def test_read_csv_names_neq_num_cols(self, kwargs):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2074.csv",
**kwargs,
)
def test_read_csv_wrong_path(self):
raising_exceptions = [e for e in io_ops_bad_exc if e != FileNotFoundError]
eval_io(
fn_name="read_csv",
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer="/some/wrong/path.csv",
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.parametrize("header", [False, True])
@pytest.mark.parametrize("mode", ["w", "wb+"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_to_csv(self, header, mode):
pandas_df = generate_dataframe()
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_csv",
extension="csv",
header=header,
mode=mode,
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_dataframe_to_csv(self):
pandas_df = pandas.read_csv(pytest.csvs_names["test_read_csv_regular"])
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_csv", extension="csv"
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_series_to_csv(self):
pandas_s = pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], usecols=["col1"]
).squeeze()
modin_s = pd.Series(pandas_s)
eval_to_file(
modin_obj=modin_s, pandas_obj=pandas_s, fn="to_csv", extension="csv"
)
def test_read_csv_within_decorator(self):
@dummy_decorator()
def wrapped_read_csv(file, method):
if method == "pandas":
return pandas.read_csv(file)
if method == "modin":
return pd.read_csv(file)
pandas_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="pandas"
)
modin_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="modin"
)
if StorageFormat.get() == "Omnisci":
# Aligning DateTime dtypes because of the bug related to the `parse_dates` parameter:
# https://github.com/modin-project/modin/issues/3485
modin_df, pandas_df = align_datetime_dtypes(modin_df, pandas_df)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_csv_file_handle(self, read_mode, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename)
with open(unique_filename, mode=read_mode) as buffer:
df_pandas = pandas.read_csv(buffer)
buffer.seek(0)
df_modin = pd.read_csv(buffer)
df_equals(df_modin, df_pandas)
def test_unnamed_index(self):
def get_internal_df(df):
partition = read_df._query_compiler._modin_frame._partitions[0][0]
return partition.to_pandas()
path = "modin/pandas/test/data/issue_3119.csv"
read_df = pd.read_csv(path, index_col=0)
assert get_internal_df(read_df).index.name is None
read_df = pd.read_csv(path, index_col=[0, 1])
for name1, name2 in zip(get_internal_df(read_df).index.names, [None, "a"]):
assert name1 == name2
def test_read_csv_empty_frame(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=["col1"],
index_col="col1",
)
@pytest.mark.parametrize(
"skiprows",
[
lambda x: x > 20,
lambda x: True,
lambda x: x in [10, 20],
pytest.param(
lambda x: x << 10,
marks=pytest.mark.skipif(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
),
),
],
)
def test_read_csv_skiprows_corner_cases(self, skiprows):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
skiprows=skiprows,
)
class TestTable:
def test_read_table(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
)
def test_read_table_within_decorator(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
@dummy_decorator()
def wrapped_read_table(file, method):
if method == "pandas":
return pandas.read_table(file)
if method == "modin":
return pd.read_table(file)
pandas_df = wrapped_read_table(unique_filename, method="pandas")
modin_df = wrapped_read_table(unique_filename, method="modin")
df_equals(modin_df, pandas_df)
def test_read_table_empty_frame(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
usecols=["col1"],
index_col="col1",
)
class TestParquet:
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension="parquet")
make_parquet_file(filename=unique_filename)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_directory(self, make_parquet_file, columns): #
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, directory=True)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_partitioned_directory(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, partitioned_columns=["col1"])
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename)
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
pandas_df.set_index(["idx", "A"]).to_parquet(unique_filename)
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
os.remove(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index_partitioned(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename, partition_cols=["A"])
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
shutil.rmtree(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_hdfs(self):
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path="modin/pandas/test/data/hdfs.parquet",
)
@pytest.mark.parametrize("path_type", ["url", "object"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_s3(self, path_type):
dataset_url = "s3://modin-datasets/testing/test_data.parquet"
if path_type == "object":
import s3fs
fs = s3fs.S3FileSystem(anon=True)
with fs.open(dataset_url, "rb") as file_obj:
eval_io("read_parquet", path=file_obj)
else:
eval_io("read_parquet", path=dataset_url, storage_options={"anon": True})
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_without_metadata(self):
"""Test that Modin can read parquet files not written by pandas."""
from pyarrow import csv
from pyarrow import parquet
parquet_fname = get_unique_filename(extension="parquet")
csv_fname = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.to_csv(csv_fname, index=False)
# read into pyarrow table and write it to a parquet file
t = csv.read_csv(csv_fname)
parquet.write_table(t, parquet_fname)
df_equals(
pd.read_parquet(parquet_fname), pandas.read_parquet(parquet_fname)
)
finally:
teardown_test_files([parquet_fname, csv_fname])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_to_parquet(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
parquet_eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_parquet",
extension="parquet",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_2462(self):
test_df = pandas.DataFrame({"col1": [["ad_1", "ad_2"], ["ad_3"]]})
with tempfile.TemporaryDirectory() as directory:
path = f"{directory}/data"
os.makedirs(path)
test_df.to_parquet(path + "/part-00000.parquet")
read_df = pd.read_parquet(path)
df_equals(test_df, read_df)
class TestJson:
@pytest.mark.parametrize("lines", [False, True])
def test_read_json(self, make_json_file, lines):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf=make_json_file(lines=lines),
lines=lines,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_json_s3(self, storage_options):
eval_io(
fn_name="read_json",
path_or_buf="s3://modin-datasets/testing/test_data.json",
lines=True,
orient="records",
storage_options=storage_options,
)
def test_read_json_categories(self):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf="modin/pandas/test/data/test_categories.json",
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize(
"data",
[json_short_string, json_short_bytes, json_long_string, json_long_bytes],
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_string_bytes(self, data):
with pytest.warns(UserWarning):
modin_df = pd.read_json(data)
# For I/O objects we need to rewind to reuse the same object.
if hasattr(data, "seek"):
data.seek(0)
df_equals(modin_df, pandas.read_json(data))
def test_to_json(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_json", extension="json"
)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_json_file_handle(self, make_json_file, read_mode):
with open(make_json_file(), mode=read_mode) as buf:
df_pandas = pandas.read_json(buf)
buf.seek(0)
df_modin = pd.read_json(buf)
df_equals(df_pandas, df_modin)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_metadata(self, make_json_file):
# `lines=True` is for triggering Modin implementation,
# `orient="records"` should be set if `lines=True`
df = pd.read_json(
make_json_file(ncols=80, lines=True), lines=True, orient="records"
)
parts_width_cached = df._query_compiler._modin_frame._column_widths_cache
num_splits = len(df._query_compiler._modin_frame._partitions[0])
parts_width_actual = [
len(df._query_compiler._modin_frame._partitions[0][i].get().columns)
for i in range(num_splits)
]
assert parts_width_cached == parts_width_actual
class TestExcel:
@check_file_leaks
def test_read_excel(self, make_excel_file):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=make_excel_file(),
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_engine(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
engine="openpyxl",
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_index_col(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
index_col=0,
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_all_sheets(self, make_excel_file):
unique_filename = make_excel_file()
pandas_df = pandas.read_excel(unique_filename, sheet_name=None)
modin_df = pd.read_excel(unique_filename, sheet_name=None)
assert isinstance(pandas_df, (OrderedDict, dict))
assert isinstance(modin_df, type(pandas_df))
assert pandas_df.keys() == modin_df.keys()
for key in pandas_df.keys():
df_equals(modin_df.get(key), pandas_df.get(key))
@pytest.mark.xfail(
Engine.get() != "Python",
reason="pandas throws the exception. See pandas issue #39250 for more info",
)
@check_file_leaks
def test_read_excel_sheetname_title(self):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/pandas/test/data/excel_sheetname_title.xlsx",
)
@check_file_leaks
def test_excel_empty_line(self):
path = "modin/pandas/test/data/test_emptyline.xlsx"
modin_df = pd.read_excel(path)
assert str(modin_df)
@pytest.mark.parametrize(
"sheet_name",
[
"Sheet1",
"AnotherSpecialName",
"SpecialName",
"SecondSpecialName",
0,
1,
2,
3,
],
)
@check_file_leaks
def test_read_excel_sheet_name(self, sheet_name):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/pandas/test/data/modin_error_book.xlsx",
sheet_name=sheet_name,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="TypeError: Expected list, got type - issue #3284",
)
def test_ExcelFile(self, make_excel_file):
unique_filename = make_excel_file()
modin_excel_file = pd.ExcelFile(unique_filename)
pandas_excel_file = pandas.ExcelFile(unique_filename)
try:
df_equals(modin_excel_file.parse(), pandas_excel_file.parse())
assert modin_excel_file.io == unique_filename
assert isinstance(modin_excel_file, pd.ExcelFile)
finally:
modin_excel_file.close()
pandas_excel_file.close()
@pytest.mark.xfail(strict=False, reason="Flaky test, defaults to pandas")
def test_to_excel(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
unique_filename_modin = get_unique_filename(extension="xlsx")
unique_filename_pandas = get_unique_filename(extension="xlsx")
modin_writer = pandas.ExcelWriter(unique_filename_modin)
pandas_writer = pandas.ExcelWriter(unique_filename_pandas)
try:
modin_df.to_excel(modin_writer)
pandas_df.to_excel(pandas_writer)
modin_writer.save()
pandas_writer.save()
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
finally:
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.xfail(
Engine.get() != "Python", reason="Test fails because of issue 3305"
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_empty_frame(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
usecols=[0],
index_col=0,
)
class TestHdf:
@pytest.mark.parametrize("format", [None, "table"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_hdf(self, make_hdf_file, format):
eval_io(
fn_name="read_hdf",
# read_hdf kwargs
path_or_buf=make_hdf_file(format=format),
key="df",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_HDFStore(self):
hdf_file = None
unique_filename_modin = get_unique_filename(extension="hdf")
unique_filename_pandas = get_unique_filename(extension="hdf")
try:
modin_store = pd.HDFStore(unique_filename_modin)
pandas_store = pandas.HDFStore(unique_filename_pandas)
modin_df, pandas_df = create_test_dfs(TEST_DATA)
modin_store["foo"] = modin_df
pandas_store["foo"] = pandas_df
modin_df = modin_store.get("foo")
pandas_df = pandas_store.get("foo")
df_equals(modin_df, pandas_df)
modin_store.close()
pandas_store.close()
modin_df = pandas.read_hdf(unique_filename_modin, key="foo", mode="r")
pandas_df = pandas.read_hdf(unique_filename_pandas, key="foo", mode="r")
df_equals(modin_df, pandas_df)
assert isinstance(modin_store, pd.HDFStore)
handle, hdf_file = tempfile.mkstemp(suffix=".hdf5", prefix="test_read")
os.close(handle)
with pd.HDFStore(hdf_file, mode="w") as store:
store.append("data/df1", pd.DataFrame(np.random.randn(5, 5)))
store.append("data/df2", pd.DataFrame(np.random.randn(4, 4)))
modin_df = pd.read_hdf(hdf_file, key="data/df1", mode="r")
pandas_df = pandas.read_hdf(hdf_file, key="data/df1", mode="r")
df_equals(modin_df, pandas_df)
finally:
if hdf_file:
os.unlink(hdf_file)
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_HDFStore_in_read_hdf(self):
filename = get_unique_filename(extension="hdf")
dfin = pd.DataFrame(np.random.rand(8, 8))
try:
dfin.to_hdf(filename, "/key")
with pd.HDFStore(filename) as h:
modin_df = pd.read_hdf(h, "/key")
with pandas.HDFStore(filename) as h:
pandas_df = | pandas.read_hdf(h, "/key") | pandas.read_hdf |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.