text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Make a panda data frame of the mean and std devs of each element of a
<END_TASK>
<USER_TASK:>
Description:
def summary_df_from_list(results_list, names, **kwargs):
"""Make a panda data frame of the mean and std devs of each element of a
list of 1d arrays, including the uncertainties on the values.
This just converts the array to a DataFrame and calls summary_df on it.
Parameters
----------
results_list: list of 1d numpy arrays
Must have same length as names.
names: list of strs
Names for the output df's columns.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
""" |
for arr in results_list:
assert arr.shape == (len(names),)
df = pd.DataFrame(np.stack(results_list, axis=0))
df.columns = names
return summary_df(df, **kwargs) |
<SYSTEM_TASK:>
Apply summary_df to a multiindex while preserving some levels.
<END_TASK>
<USER_TASK:>
Description:
def summary_df_from_multi(multi_in, inds_to_keep=None, **kwargs):
"""Apply summary_df to a multiindex while preserving some levels.
Parameters
----------
multi_in: multiindex pandas DataFrame
inds_to_keep: None or list of strs, optional
Index levels to preserve.
kwargs: dict, optional
Keyword arguments to pass to summary_df.
Returns
-------
df: MultiIndex DataFrame
See summary_df docstring for more details.
""" |
# Need to pop include true values and add separately at the end as
# otherwise we get multiple true values added
include_true_values = kwargs.pop('include_true_values', False)
true_values = kwargs.get('true_values', None)
if inds_to_keep is None:
inds_to_keep = list(multi_in.index.names)[:-1]
if 'calculation type' not in inds_to_keep:
df = multi_in.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
else:
# If there is already a level called 'calculation type' in multi,
# summary_df will try making a second 'calculation type' index and (as
# of pandas v0.23.0) throw an error. Avoid this by renaming.
inds_to_keep = [lev if lev != 'calculation type' else
'calculation type temp' for lev in inds_to_keep]
multi_temp = copy.deepcopy(multi_in)
multi_temp.index.set_names(
[lev if lev != 'calculation type' else 'calculation type temp' for
lev in list(multi_temp.index.names)], inplace=True)
df = multi_temp.groupby(inds_to_keep).apply(
summary_df, include_true_values=False, **kwargs)
# add the 'calculation type' values ('mean' and 'std') produced by
# summary_df to the input calculation type names (now in level
# 'calculation type temp')
ind = (df.index.get_level_values('calculation type temp') + ' ' +
df.index.get_level_values('calculation type'))
order = list(df.index.names)
order.remove('calculation type temp')
df.index = df.index.droplevel(
['calculation type', 'calculation type temp'])
df['calculation type'] = list(ind)
df.set_index('calculation type', append=True, inplace=True)
df = df.reorder_levels(order)
if include_true_values:
assert true_values is not None
tv_ind = ['true values' if name == 'calculation type' else '' for
name in df.index.names[:-1]] + ['value']
df.loc[tuple(tv_ind), :] = true_values
return df |
<SYSTEM_TASK:>
r"""Calculated data frame showing
<END_TASK>
<USER_TASK:>
Description:
def efficiency_gain_df(method_names, method_values, est_names, **kwargs):
r"""Calculated data frame showing
.. math::
\mathrm{efficiency\,gain}
=
\frac{\mathrm{Var[base\,method]}}{\mathrm{Var[new\,method]}}
See the dynamic nested sampling paper (Higson et al. 2019) for more
details.
The standard method on which to base the gain is assumed to be the first
method input.
The output DataFrame will contain rows:
mean [dynamic goal]: mean calculation result for standard nested
sampling and dynamic nested sampling with each input dynamic
goal.
std [dynamic goal]: standard deviation of results for standard
nested sampling and dynamic nested sampling with each input
dynamic goal.
gain [dynamic goal]: the efficiency gain (computational speedup)
from dynamic nested sampling compared to standard nested
sampling. This equals (variance of standard results) /
(variance of dynamic results); see the dynamic nested
sampling paper for more details.
Parameters
----------
method names: list of strs
method values: list
Each element is a list of 1d arrays of results for the method. Each
array must have shape (len(est_names),).
est_names: list of strs
Provide column titles for output df.
true_values: iterable of same length as estimators list
True values of the estimators for the given likelihood and prior.
Returns
-------
results: pandas data frame
Results data frame.
""" |
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
adjust_nsamp = kwargs.pop('adjust_nsamp', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
if adjust_nsamp is not None:
assert adjust_nsamp.shape == (len(method_names),)
assert len(method_names) == len(method_values)
df_dict = {}
for i, method_name in enumerate(method_names):
# Set include_true_values=False as we don't want them repeated for
# every method
df = summary_df_from_list(
method_values[i], est_names, true_values=true_values,
include_true_values=False, include_rmse=include_rmse)
if i != 0:
stats = ['std']
if include_rmse:
stats.append('rmse')
if adjust_nsamp is not None:
# Efficiency gain measures performance per number of
# samples (proportional to computational work). If the
# number of samples is not the same we can adjust this.
adjust = (adjust_nsamp[0] / adjust_nsamp[i])
else:
adjust = 1
for stat in stats:
# Calculate efficiency gain vs standard nested sampling
gain, gain_unc = get_eff_gain(
df_dict[method_names[0]].loc[(stat, 'value')],
df_dict[method_names[0]].loc[(stat, 'uncertainty')],
df.loc[(stat, 'value')],
df.loc[(stat, 'uncertainty')], adjust=adjust)
key = stat + ' efficiency gain'
df.loc[(key, 'value'), :] = gain
df.loc[(key, 'uncertainty'), :] = gain_unc
df_dict[method_name] = df
results = pd.concat(df_dict)
results.index.rename('dynamic settings', level=0, inplace=True)
new_ind = []
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('calculation type'), ordered=True,
categories=['true values', 'mean', 'std', 'rmse',
'std efficiency gain', 'rmse efficiency gain']))
new_ind.append(pd.CategoricalIndex(
results.index.get_level_values('dynamic settings'),
ordered=True, categories=[''] + method_names))
new_ind.append(results.index.get_level_values('result type'))
results.set_index(new_ind, inplace=True)
if include_true_values:
with warnings.catch_warnings():
# Performance not an issue here so suppress annoying warning
warnings.filterwarnings('ignore', message=(
'indexing past lexsort depth may impact performance.'))
results.loc[('true values', '', 'value'), :] = true_values
results.sort_index(inplace=True)
return results |
<SYSTEM_TASK:>
r"""Calculate the root meet squared error and its numerical uncertainty.
<END_TASK>
<USER_TASK:>
Description:
def rmse_and_unc(values_array, true_values):
r"""Calculate the root meet squared error and its numerical uncertainty.
With a reasonably large number of values in values_list the uncertainty
on sq_errors should be approximately normal (from the central limit
theorem).
Uncertainties are calculated via error propagation: if :math:`\sigma`
is the error on :math:`X` then the error on :math:`\sqrt{X}`
is :math:`\frac{\sigma}{2 \sqrt{X}}`.
Parameters
----------
values_array: 2d numpy array
Array of results: each row corresponds to a different estimate of the
quantities considered.
true_values: 1d numpy array
Correct values for the quantities considered.
Returns
-------
rmse: 1d numpy array
Root-mean-squared-error for each quantity.
rmse_unc: 1d numpy array
Numerical uncertainties on each element of rmse.
""" |
assert true_values.shape == (values_array.shape[1],)
errors = values_array - true_values[np.newaxis, :]
sq_errors = errors ** 2
sq_errors_mean = np.mean(sq_errors, axis=0)
sq_errors_mean_unc = (np.std(sq_errors, axis=0, ddof=1) /
np.sqrt(sq_errors.shape[0]))
rmse = np.sqrt(sq_errors_mean)
rmse_unc = 0.5 * (1 / rmse) * sq_errors_mean_unc
return rmse, rmse_unc |
<SYSTEM_TASK:>
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
<END_TASK>
<USER_TASK:>
Description:
def array_ratio_std(values_n, sigmas_n, values_d, sigmas_d):
r"""Gives error on the ratio of 2 floats or 2 1-dimensional arrays given
their values and uncertainties. This assumes the covariance = 0, and that
the input uncertainties are small compared to the corresponding input
values. _n and _d denote the numerator and denominator respectively.
Parameters
----------
values_n: float or numpy array
Numerator values.
sigmas_n: float or numpy array
:math:`1\sigma` uncertainties on values_n.
values_d: float or numpy array
Denominator values.
sigmas_d: float or numpy array
:math:`1\sigma` uncertainties on values_d.
Returns
-------
std: float or numpy array
:math:`1\sigma` uncertainty on values_n / values_d.
""" |
std = np.sqrt((sigmas_n / values_n) ** 2 + (sigmas_d / values_d) ** 2)
std *= (values_n / values_d)
return std |
<SYSTEM_TASK:>
Converts information on samples in a nested sampling run dictionary into
<END_TASK>
<USER_TASK:>
Description:
def array_given_run(ns_run):
"""Converts information on samples in a nested sampling run dictionary into
a numpy array representation. This allows fast addition of more samples and
recalculation of nlive.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
samples: 2d numpy array
Array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
""" |
samples = np.zeros((ns_run['logl'].shape[0], 3 + ns_run['theta'].shape[1]))
samples[:, 0] = ns_run['logl']
samples[:, 1] = ns_run['thread_labels']
# Calculate 'change in nlive' after each step
samples[:-1, 2] = np.diff(ns_run['nlive_array'])
samples[-1, 2] = -1 # nlive drops to zero after final point
samples[:, 3:] = ns_run['theta']
return samples |
<SYSTEM_TASK:>
Get the individual threads from a nested sampling run.
<END_TASK>
<USER_TASK:>
Description:
def get_run_threads(ns_run):
"""
Get the individual threads from a nested sampling run.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
Returns
-------
threads: list of numpy array
Each thread (list element) is a samples array containing columns
[logl, thread label, change in nlive at sample, (thetas)]
with each row representing a single sample.
""" |
samples = array_given_run(ns_run)
unique_threads = np.unique(ns_run['thread_labels'])
assert ns_run['thread_min_max'].shape[0] == unique_threads.shape[0], (
'some threads have no points! {0} != {1}'.format(
unique_threads.shape[0], ns_run['thread_min_max'].shape[0]))
threads = []
for i, th_lab in enumerate(unique_threads):
thread_array = samples[np.where(samples[:, 1] == th_lab)]
# delete changes in nlive due to other threads in the run
thread_array[:, 2] = 0
thread_array[-1, 2] = -1
min_max = np.reshape(ns_run['thread_min_max'][i, :], (1, 2))
assert min_max[0, 1] == thread_array[-1, 0], (
'thread max logl should equal logl of its final point!')
threads.append(dict_given_run_array(thread_array, min_max))
return threads |
<SYSTEM_TASK:>
Combine a list of complete nested sampling run dictionaries into a single
<END_TASK>
<USER_TASK:>
Description:
def combine_ns_runs(run_list_in, **kwargs):
"""
Combine a list of complete nested sampling run dictionaries into a single
ns run.
Input runs must contain any repeated threads.
Parameters
----------
run_list_in: list of dicts
List of nested sampling runs in dict format (see data_processing module
docstring for more details).
kwargs: dict, optional
Options for check_ns_run.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
""" |
run_list = copy.deepcopy(run_list_in)
if len(run_list) == 1:
run = run_list[0]
else:
nthread_tot = 0
for i, _ in enumerate(run_list):
check_ns_run(run_list[i], **kwargs)
run_list[i]['thread_labels'] += nthread_tot
nthread_tot += run_list[i]['thread_min_max'].shape[0]
thread_min_max = np.vstack([run['thread_min_max'] for run in run_list])
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(run) for run in run_list])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# Make combined run
run = dict_given_run_array(samples_temp, thread_min_max)
# Combine only the additive properties stored in run['output']
run['output'] = {}
for key in ['nlike', 'ndead']:
try:
run['output'][key] = sum([temp['output'][key] for temp in
run_list_in])
except KeyError:
pass
check_ns_run(run, **kwargs)
return run |
<SYSTEM_TASK:>
Combine list of threads into a single ns run.
<END_TASK>
<USER_TASK:>
Description:
def combine_threads(threads, assert_birth_point=False):
"""
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
""" |
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# update the changes in live points column for threads which start part way
# through the run. These are only present in dynamic nested sampling.
logl_starts = thread_min_max[:, 0]
state = np.random.get_state() # save random state
np.random.seed(0) # seed to make sure any random assignment is repoducable
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), \
'No unique birth point! ' + str(ind.shape)
if ind.shape == (1,):
# If the point at which this thread started is present exactly
# once in this bootstrap replication:
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
# If the point with the likelihood at which the thread started
# is not present in this particular bootstrap replication,
# approximate it with the point with the nearest likelihood.
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
# If the point at which this thread started is present multiple
# times in this bootstrap replication, select one at random to
# increment nlive on. This avoids any systematic bias from e.g.
# always choosing the first point.
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
# make run
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
# If the threads are not valid (e.g. for bootstrap resamples) then
# set them to None so they can't be accidentally used
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run |
<SYSTEM_TASK:>
Get the relative posterior weights of the samples, normalised so
<END_TASK>
<USER_TASK:>
Description:
def get_w_rel(ns_run, simulate=False):
"""Get the relative posterior weights of the samples, normalised so
the maximum sample weight is 1. This is calculated from get_logw with
protection against numerical overflows.
Parameters
----------
ns_run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
simulate: bool, optional
See the get_logw docstring for more details.
Returns
-------
w_rel: 1d numpy array
Relative posterior masses of points.
""" |
logw = get_logw(ns_run, simulate=simulate)
return np.exp(logw - logw.max()) |
<SYSTEM_TASK:>
r"""Returns a logx vector showing the expected or simulated logx positions
<END_TASK>
<USER_TASK:>
Description:
def get_logx(nlive, simulate=False):
r"""Returns a logx vector showing the expected or simulated logx positions
of points.
The shrinkage factor between two points
.. math:: t_i = X_{i-1} / X_{i}
is distributed as the largest of :math:`n_i` uniform random variables
between 1 and 0, where :math:`n_i` is the local number of live points.
We are interested in
.. math:: \log(t_i) = \log X_{i-1} - \log X_{i}
which has expected value :math:`-1/n_i`.
Parameters
----------
nlive_array: 1d numpy array
Ordered local number of live points present at each point's
iso-likelihood contour.
simulate: bool, optional
Should log prior volumes logx be simulated from their distribution (if
False their expected values are used).
Returns
-------
logx: 1d numpy array
log X values for points.
""" |
assert nlive.min() > 0, (
'nlive contains zeros or negative values! nlive = ' + str(nlive))
if simulate:
logx_steps = np.log(np.random.random(nlive.shape)) / nlive
else:
logx_steps = -1 * (nlive.astype(float) ** -1)
return np.cumsum(logx_steps) |
<SYSTEM_TASK:>
Check nested sampling run member keys and values.
<END_TASK>
<USER_TASK:>
Description:
def check_ns_run_members(run):
"""Check nested sampling run member keys and values.
Parameters
----------
run: dict
nested sampling run to check.
Raises
------
AssertionError
if run does not have expected properties.
""" |
run_keys = list(run.keys())
# Mandatory keys
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert key in run_keys
run_keys.remove(key)
# Optional keys
for key in ['output']:
try:
run_keys.remove(key)
except ValueError:
pass
# Check for unexpected keys
assert not run_keys, 'Unexpected keys in ns_run: ' + str(run_keys)
# Check type of mandatory members
for key in ['logl', 'nlive_array', 'theta', 'thread_labels',
'thread_min_max']:
assert isinstance(run[key], np.ndarray), (
key + ' is type ' + type(run[key]).__name__)
# check shapes of keys
assert run['logl'].ndim == 1
assert run['logl'].shape == run['nlive_array'].shape
assert run['logl'].shape == run['thread_labels'].shape
assert run['theta'].ndim == 2
assert run['logl'].shape[0] == run['theta'].shape[0] |
<SYSTEM_TASK:>
Check run logls are unique and in the correct order.
<END_TASK>
<USER_TASK:>
Description:
def check_ns_run_logls(run, dup_assert=False, dup_warn=False):
"""Check run logls are unique and in the correct order.
Parameters
----------
run: dict
nested sampling run to check.
dup_assert: bool, optional
Whether to raise and AssertionError if there are duplicate logl values.
dup_warn: bool, optional
Whether to give a UserWarning if there are duplicate logl values (only
used if dup_assert is False).
Raises
------
AssertionError
if run does not have expected properties.
""" |
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])])
if dup_assert or dup_warn:
unique_logls, counts = np.unique(run['logl'], return_counts=True)
repeat_logls = run['logl'].shape[0] - unique_logls.shape[0]
msg = ('{} duplicate logl values (out of a total of {}). This may be '
'caused by limited numerical precision in the output files.'
'\nrepeated logls = {}\ncounts = {}\npositions in list of {}'
' unique logls = {}').format(
repeat_logls, run['logl'].shape[0],
unique_logls[counts != 1], counts[counts != 1],
unique_logls.shape[0], np.where(counts != 1)[0])
if dup_assert:
assert repeat_logls == 0, msg
elif dup_warn:
if repeat_logls != 0:
warnings.warn(msg, UserWarning) |
<SYSTEM_TASK:>
Check thread labels and thread_min_max have expected properties.
<END_TASK>
<USER_TASK:>
Description:
def check_ns_run_threads(run):
"""Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties.
""" |
assert run['thread_labels'].dtype == int
uniq_th = np.unique(run['thread_labels'])
assert np.array_equal(
np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \
str(uniq_th)
# Check thread_min_max
assert np.any(run['thread_min_max'][:, 0] == -np.inf), (
'Run should have at least one thread which starts by sampling the ' +
'whole prior')
for th_lab in uniq_th:
inds = np.where(run['thread_labels'] == th_lab)[0]
th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format(
th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :])
assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], (
'First point in thread has logl less than thread min logl! ' +
th_info + ', difference={}'.format(
run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0]))
assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], (
'Last point in thread logl != thread end logl! ' + th_info) |
<SYSTEM_TASK:>
r"""Number of samples in run.
<END_TASK>
<USER_TASK:>
Description:
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
""" |
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0] |
<SYSTEM_TASK:>
Produce a latex formatted name for each function for use in labelling
<END_TASK>
<USER_TASK:>
Description:
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
""" |
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise |
<SYSTEM_TASK:>
Get quantile estimate for input probability given weighted samples using
<END_TASK>
<USER_TASK:>
Description:
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
""" |
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds]) |
<SYSTEM_TASK:>
Gets a data frame with calculation values and error diagnostics for each
<END_TASK>
<USER_TASK:>
Description:
def run_list_error_values(run_list, estimator_list, estimator_names,
n_simulate=100, **kwargs):
"""Gets a data frame with calculation values and error diagnostics for each
run in the input run list.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int, optional
Number of bootstrap replications to use on each run.
thread_pvalue: bool, optional
Whether or not to compute KS test diaganostic for correlations between
threads within a run.
bs_stat_dist: bool, optional
Whether or not to compute statistical distance between bootstrap error
distributions diaganostic.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs (or pairs of runs for pairwise comparisons).
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
""" |
thread_pvalue = kwargs.pop('thread_pvalue', False)
bs_stat_dist = kwargs.pop('bs_stat_dist', False)
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# Calculation results
# -------------------
df = estimator_values_df(run_list, estimator_list, parallel=parallel,
estimator_names=estimator_names)
df.index = df.index.map(str)
df['calculation type'] = 'values'
df.set_index('calculation type', drop=True, append=True, inplace=True)
df = df.reorder_levels(['calculation type', 'run'])
# Bootstrap stds
# --------------
# Create bs_vals_df then convert to stds so bs_vals_df does not need to be
# recomputed if bs_stat_dist is True
bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names,
n_simulate, parallel=parallel)
bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1))
bs_std_df.index.name = 'run'
bs_std_df['calculation type'] = 'bootstrap std'
bs_std_df.set_index('calculation type', drop=True, append=True,
inplace=True)
bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run'])
df = pd.concat([df, bs_std_df])
# Pairwise KS p-values on threads
# -------------------------------
if thread_pvalue:
t_vals_df = thread_values_df(
run_list, estimator_list, estimator_names, parallel=parallel)
t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False,
energy_dist=False)
# Keep only the p value not the distance measures
t_d_df = t_d_df.xs('ks pvalue', level='calculation type',
drop_level=False)
# Append 'thread ' to caclulcation type
t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type',
inplace=True)
df = pd.concat([df, t_d_df])
# Pairwise distances on BS distributions
# --------------------------------------
if bs_stat_dist:
b_d_df = pairwise_dists_on_cols(bs_vals_df)
# Select only statistical distances - not KS pvalue as this is not
# useful for the bootstrap resample distributions (see Higson et al.
# 2019 for more details).
dists = ['ks distance', 'earth mover distance', 'energy distance']
b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :]
# Append 'bootstrap ' to caclulcation type
new_ind = ['bootstrap ' +
b_d_df.index.get_level_values('calculation type'),
b_d_df.index.get_level_values('run')]
b_d_df.set_index(new_ind, inplace=True)
df = pd.concat([df, b_d_df])
return df |
<SYSTEM_TASK:>
Get a dataframe of estimator values.
<END_TASK>
<USER_TASK:>
Description:
def estimator_values_df(run_list, estimator_list, **kwargs):
"""Get a dataframe of estimator values.
NB when parallelised the results will not be produced in order (so results
from some run number will not nessesarily correspond to that number run in
run_list).
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs, optional
Name of each func in estimator_list.
parallel: bool, optional
Whether or not to parallelise - see parallel_utils.parallel_apply.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
df: pandas DataFrame
Results table showing calculation values and diagnostics. Rows
show different runs.
Columns have titles given by estimator_names and show results for the
different functions in estimators_list.
""" |
estimator_names = kwargs.pop(
'estimator_names',
['est_' + str(i) for i in range(len(estimator_list))])
parallel = kwargs.pop('parallel', True)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
values_list = pu.parallel_apply(
nestcheck.ns_run_utils.run_estimators, run_list,
func_args=(estimator_list,), parallel=parallel)
df = pd.DataFrame(np.stack(values_list, axis=0))
df.columns = estimator_names
df.index.name = 'run'
return df |
<SYSTEM_TASK:>
Get summary statistics about calculation errors, including estimated
<END_TASK>
<USER_TASK:>
Description:
def error_values_summary(error_values, **summary_df_kwargs):
"""Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results.
""" |
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)
# get implementation stds
imp_std, imp_std_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values std', 'value')],
df.loc[('values std', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation std', 'value'), df.columns] = imp_std
df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc
df.loc[('implementation std frac', 'value'), :] = imp_frac
df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc
# Get implementation RMSEs (calculated using the values RMSE instead of
# values std)
if 'values rmse' in set(df.index.get_level_values('calculation type')):
imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values rmse', 'value')],
df.loc[('values rmse', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse
df.loc[('implementation rmse', 'uncertainty'), df.columns] = \
imp_rmse_unc
df.loc[('implementation rmse frac', 'value'), :] = imp_frac
df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc
# Return only the calculation types we are interested in, in order
calcs_to_keep = ['true values', 'values mean', 'values std',
'values rmse', 'bootstrap std mean',
'implementation std', 'implementation std frac',
'implementation rmse', 'implementation rmse frac',
'thread ks pvalue mean', 'bootstrap ks distance mean',
'bootstrap energy distance mean',
'bootstrap earth mover distance mean']
df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for
calc in calcs_to_keep if calc in
df.index.get_level_values('calculation type')])
return df |
<SYSTEM_TASK:>
Wrapper which runs run_list_error_values then applies error_values
<END_TASK>
<USER_TASK:>
Description:
def run_list_error_summary(run_list, estimator_list, estimator_names,
n_simulate, **kwargs):
"""Wrapper which runs run_list_error_values then applies error_values
summary to the resulting dataframe. See the docstrings for those two
funcions for more details and for descriptions of parameters and output.
""" |
true_values = kwargs.pop('true_values', None)
include_true_values = kwargs.pop('include_true_values', False)
include_rmse = kwargs.pop('include_rmse', False)
error_values = run_list_error_values(run_list, estimator_list,
estimator_names, n_simulate, **kwargs)
return error_values_summary(error_values, true_values=true_values,
include_true_values=include_true_values,
include_rmse=include_rmse) |
<SYSTEM_TASK:>
Computes a data frame of bootstrap resampled values.
<END_TASK>
<USER_TASK:>
Description:
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
"""Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
""" |
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
# Check there are the correct number of bootstrap replications in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
'Should be n_simulate=' + str(n_simulate) + ' values in ' +
'each cell. The cell contains array with shape ' +
str(vals_shape))
return df |
<SYSTEM_TASK:>
Calculates estimator values for the constituent threads of the input
<END_TASK>
<USER_TASK:>
Description:
def thread_values_df(run_list, estimator_list, estimator_names, **kwargs):
"""Calculates estimator values for the constituent threads of the input
runs.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d numpy array with length equal to the number
of threads in the run, containing the results from evaluating the
estimator on each thread.
""" |
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
# get thread results
thread_vals_arrays = pu.parallel_apply(
nestcheck.error_analysis.run_thread_values, run_list,
func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in thread_vals_arrays]
# Check there are the correct number of thread values in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (run_list[0]['thread_min_max'].shape[0],), \
('Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]) +
' values in each cell. The cell contains array with shape ' +
str(vals_shape))
return df |
<SYSTEM_TASK:>
Computes pairwise statistical distance measures.
<END_TASK>
<USER_TASK:>
Description:
def pairwise_dists_on_cols(df_in, earth_mover_dist=True, energy_dist=True):
"""Computes pairwise statistical distance measures.
parameters
----------
df_in: pandas data frame
Columns represent estimators and rows represent runs.
Each data frane element is an array of values which are used as samples
in the distance measures.
earth_mover_dist: bool, optional
Passed to error_analysis.pairwise_distances.
energy_dist: bool, optional
Passed to error_analysis.pairwise_distances.
returns
-------
df: pandas data frame with kl values for each pair.
""" |
df = pd.DataFrame()
for col in df_in.columns:
df[col] = nestcheck.error_analysis.pairwise_distances(
df_in[col].values, earth_mover_dist=earth_mover_dist,
energy_dist=energy_dist)
return df |
<SYSTEM_TASK:>
Quote the column names
<END_TASK>
<USER_TASK:>
Description:
def _backtick_columns(cols):
"""
Quote the column names
""" |
def bt(s):
b = '' if s == '*' or not s else '`'
return [_ for _ in [b + (s or '') + b] if _]
formatted = []
for c in cols:
if c[0] == '#':
formatted.append(c[1:])
elif c.startswith('(') and c.endswith(')'):
# WHERE (column_a, column_b) IN ((1,10), (1,20))
formatted.append(c)
else:
# backtick the former part when it meets the first dot, and then all the rest
formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:]))))
return ', '.join(formatted) |
<SYSTEM_TASK:>
Allow select.group and select.order accepting string and list
<END_TASK>
<USER_TASK:>
Description:
def _by_columns(self, columns):
"""
Allow select.group and select.order accepting string and list
""" |
return columns if self.isstr(columns) else self._backtick_columns(columns) |
<SYSTEM_TASK:>
Returns a list containing the whitespace to the left and
<END_TASK>
<USER_TASK:>
Description:
def get_whitespace(txt):
"""
Returns a list containing the whitespace to the left and
right of a string as its two elements
""" |
# if the entire parameter is whitespace
rall = re.search(r'^([\s])+$', txt)
if rall:
tmp = txt.split('\n', 1)
if len(tmp) == 2:
return (tmp[0], '\n' + tmp[1]) # left, right
else:
return ('', tmp[0]) # left, right
left = ''
# find whitespace to the left of the parameter
rlm = re.search(r'^([\s])+', txt)
if rlm:
left = rlm.group(0)
right = ''
# find whitespace to the right of the parameter
rrm = re.search(r'([\s])+$', txt)
if rrm:
right = rrm.group(0)
return (left, right) |
<SYSTEM_TASK:>
Try to find a whitespace pattern in the existing parameters
<END_TASK>
<USER_TASK:>
Description:
def find_whitespace_pattern(self):
"""
Try to find a whitespace pattern in the existing parameters
to be applied to a newly added parameter
""" |
name_ws = []
value_ws = []
for entry in self._entries:
name_ws.append(get_whitespace(entry.name))
if entry.value != '':
value_ws.append(get_whitespace(entry._value)) # _value is unstripped
if len(value_ws) >= 1:
value_ws = most_common(value_ws)
else:
value_ws = ('', ' ')
if len(name_ws) >= 1:
name_ws = most_common(name_ws)
else:
name_ws = (' ', '')
return name_ws, value_ws |
<SYSTEM_TASK:>
Generate the path on disk for a specified project and date.
<END_TASK>
<USER_TASK:>
Description:
def _path_for_file(self, project_name, date):
"""
Generate the path on disk for a specified project and date.
:param project_name: the PyPI project name for the data
:type project: str
:param date: the date for the data
:type date: datetime.datetime
:return: path for where to store this data on disk
:rtype: str
""" |
return os.path.join(
self.cache_path,
'%s_%s.json' % (project_name, date.strftime('%Y%m%d'))
) |
<SYSTEM_TASK:>
Get the cache data for a specified project for the specified date.
<END_TASK>
<USER_TASK:>
Description:
def get(self, project, date):
"""
Get the cache data for a specified project for the specified date.
Returns None if the data cannot be found in the cache.
:param project: PyPi project name to get data for
:type project: str
:param date: date to get data for
:type date: datetime.datetime
:return: dict of per-date data for project
:rtype: :py:obj:`dict` or ``None``
""" |
fpath = self._path_for_file(project, date)
logger.debug('Cache GET project=%s date=%s - path=%s',
project, date.strftime('%Y-%m-%d'), fpath)
try:
with open(fpath, 'r') as fh:
data = json.loads(fh.read())
except:
logger.debug('Error getting from cache for project=%s date=%s',
project, date.strftime('%Y-%m-%d'))
return None
data['cache_metadata']['date'] = datetime.strptime(
data['cache_metadata']['date'],
'%Y%m%d'
)
data['cache_metadata']['updated'] = datetime.fromtimestamp(
data['cache_metadata']['updated']
)
return data |
<SYSTEM_TASK:>
Set the cache data for a specified project for the specified date.
<END_TASK>
<USER_TASK:>
Description:
def set(self, project, date, data, data_ts):
"""
Set the cache data for a specified project for the specified date.
:param project: project name to set data for
:type project: str
:param date: date to set data for
:type date: datetime.datetime
:param data: data to cache
:type data: dict
:param data_ts: maximum timestamp in the BigQuery data table
:type data_ts: int
""" |
data['cache_metadata'] = {
'project': project,
'date': date.strftime('%Y%m%d'),
'updated': time.time(),
'version': VERSION,
'data_ts': data_ts
}
fpath = self._path_for_file(project, date)
logger.debug('Cache SET project=%s date=%s - path=%s',
project, date.strftime('%Y-%m-%d'), fpath)
with open(fpath, 'w') as fh:
fh.write(json.dumps(data)) |
<SYSTEM_TASK:>
Return a list of the dates we have in cache for the specified project,
<END_TASK>
<USER_TASK:>
Description:
def get_dates_for_project(self, project):
"""
Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime
""" |
file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project)
all_dates = []
for f in os.listdir(self.cache_path):
if not os.path.isfile(os.path.join(self.cache_path, f)):
continue
m = file_re.match(f)
if m is None:
continue
all_dates.append(datetime.strptime(m.group(1), '%Y%m%d'))
return sorted(all_dates) |
<SYSTEM_TASK:>
Set logger level and format.
<END_TASK>
<USER_TASK:>
Description:
def set_log_level_format(level, format):
"""
Set logger level and format.
:param level: logging level; see the :py:mod:`logging` constants.
:type level: int
:param format: logging formatter format string
:type format: str
""" |
formatter = logging.Formatter(fmt=format)
logger.handlers[0].setFormatter(formatter)
logger.setLevel(level) |
<SYSTEM_TASK:>
Given the username of a PyPI user, return a list of all of the user's
<END_TASK>
<USER_TASK:>
Description:
def _pypi_get_projects_for_user(username):
"""
Given the username of a PyPI user, return a list of all of the user's
projects from the XMLRPC interface.
See: https://wiki.python.org/moin/PyPIXmlRpc
:param username: PyPI username
:type username: str
:return: list of string project names
:rtype: ``list``
""" |
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
pkgs = client.user_packages(username) # returns [role, package]
return [x[1] for x in pkgs] |
<SYSTEM_TASK:>
Generate the graph; return a 2-tuple of strings, script to place in the
<END_TASK>
<USER_TASK:>
Description:
def generate_graph(self):
"""
Generate the graph; return a 2-tuple of strings, script to place in the
head of the HTML document and div content for the graph itself.
:return: 2-tuple (script, div)
:rtype: tuple
""" |
logger.debug('Generating graph for %s', self._graph_id)
# tools to use
tools = [
PanTool(),
BoxZoomTool(),
WheelZoomTool(),
SaveTool(),
ResetTool(),
ResizeTool()
]
# generate the stacked area graph
try:
g = Area(
self._data, x='Date', y=self._y_series_names,
title=self._title, stack=True, xlabel='Date',
ylabel='Downloads', tools=tools,
# note the width and height will be set by JavaScript
plot_height=400, plot_width=800,
toolbar_location='above', legend=False
)
except Exception as ex:
logger.error("Error generating %s graph", self._graph_id)
logger.error("Data: %s", self._data)
logger.error("y=%s", self._y_series_names)
raise ex
lines = []
legend_parts = []
# add a line at the top of each Patch (stacked area) for hovertool
for renderer in g.select(GlyphRenderer):
if not isinstance(renderer.glyph, Patches):
continue
series_name = renderer.data_source.data['series'][0]
logger.debug('Adding line for Patches %s (series: %s)', renderer,
series_name)
line = self._line_for_patches(self._data, g, renderer, series_name)
if line is not None:
lines.append(line)
legend_parts.append((series_name, [line]))
# add the Hovertool, specifying only our line glyphs
g.add_tools(
HoverTool(
tooltips=[
(self._y_name, '@SeriesName'),
('Date', '@FmtDate'),
('Downloads', '@Downloads'),
],
renderers=lines,
line_policy='nearest'
)
)
# legend outside chart area
legend = Legend(legends=legend_parts, location=(0, 0))
g.add_layout(legend, 'right')
return components(g) |
<SYSTEM_TASK:>
Add a line along the top edge of a Patch in a stacked Area Chart; return
<END_TASK>
<USER_TASK:>
Description:
def _line_for_patches(self, data, chart, renderer, series_name):
"""
Add a line along the top edge of a Patch in a stacked Area Chart; return
the new Glyph for addition to HoverTool.
:param data: original data for the graph
:type data: dict
:param chart: Chart to add the line to
:type chart: bokeh.charts.Chart
:param renderer: GlyphRenderer containing one Patches glyph, to draw
the line for
:type renderer: bokeh.models.renderers.GlyphRenderer
:param series_name: the data series name this Patches represents
:type series_name: str
:return: GlyphRenderer for a Line at the top edge of this Patch
:rtype: bokeh.models.renderers.GlyphRenderer
""" |
# @TODO this method needs a major refactor
# get the original x and y values, and color
xvals = deepcopy(renderer.data_source.data['x_values'][0])
yvals = deepcopy(renderer.data_source.data['y_values'][0])
line_color = renderer.glyph.fill_color
# save original values for logging if needed
orig_xvals = [x for x in xvals]
orig_yvals = [y for y in yvals]
# get a list of the values
new_xvals = [x for x in xvals]
new_yvals = [y for y in yvals]
# so when a Patch is made, the first point is (0,0); trash it
xvals = new_xvals[1:]
yvals = new_yvals[1:]
# then, we can tell the last point in the "top" line because it will be
# followed by a point with the same x value and a y value of 0.
last_idx = None
for idx, val in enumerate(xvals):
if yvals[idx+1] == 0 and xvals[idx+1] == xvals[idx]:
last_idx = idx
break
if last_idx is None:
logger.error('Unable to find top line of patch (x_values=%s '
'y_values=%s', orig_xvals, orig_yvals)
return None
# truncate our values to just what makes up the top line
xvals = xvals[:last_idx+1]
yvals = yvals[:last_idx+1]
# Currently (bokeh 0.12.1) HoverTool won't show the tooltip for the last
# point in our line. As a hack for this, add a point with the same Y
# value and an X slightly before it.
lastx = xvals[-1]
xvals[-1] = lastx - 1000 # 1000 nanoseconds
xvals.append(lastx)
yvals.append(yvals[-1])
# get the actual download counts from the original data
download_counts = [
data[series_name][y] for y in range(0, len(yvals) - 1)
]
download_counts.append(download_counts[-1])
# create a ColumnDataSource for the new overlay line
data2 = {
'x': xvals, # Date/x values are numpy.datetime64
'y': yvals,
# the following are hacks for data that we want in the HoverTool
# tooltip
'SeriesName': [series_name for _ in yvals],
# formatted date
'FmtDate': [self.datetime64_to_formatted_date(x) for x in xvals],
# to show the exact value, not where the pointer is
'Downloads': download_counts
}
# set the formatted date for our hacked second-to-last point to the
# same value as the last point
data2['FmtDate'][-2] = data2['FmtDate'][-1]
# create the CloumnDataSource, then the line for it, then the Glyph
line_ds = ColumnDataSource(data2)
line = Line(x='x', y='y', line_color=line_color)
lineglyph = chart.add_glyph(line_ds, line)
return lineglyph |
<SYSTEM_TASK:>
Return True if the specified cache record has no data, False otherwise.
<END_TASK>
<USER_TASK:>
Description:
def _is_empty_cache_record(self, rec):
"""
Return True if the specified cache record has no data, False otherwise.
:param rec: cache record returned by :py:meth:`~._cache_get`
:type rec: dict
:return: True if record is empty, False otherwise
:rtype: bool
""" |
# these are taken from DataQuery.query_one_table()
for k in [
'by_version',
'by_file_type',
'by_installer',
'by_implementation',
'by_system',
'by_distro',
'by_country'
]:
if k in rec and len(rec[k]) > 0:
return False
return True |
<SYSTEM_TASK:>
Return cache data for the specified day; cache locally in this class.
<END_TASK>
<USER_TASK:>
Description:
def _cache_get(self, date):
"""
Return cache data for the specified day; cache locally in this class.
:param date: date to get data for
:type date: datetime.datetime
:return: cache data for date
:rtype: dict
""" |
if date in self.cache_data:
logger.debug('Using class-cached data for date %s',
date.strftime('%Y-%m-%d'))
return self.cache_data[date]
logger.debug('Getting data from cache for date %s',
date.strftime('%Y-%m-%d'))
data = self.cache.get(self.project_name, date)
self.cache_data[date] = data
return data |
<SYSTEM_TASK:>
Return download data by version.
<END_TASK>
<USER_TASK:>
Description:
def per_version_data(self):
"""
Return download data by version.
:return: dict of cache data; keys are datetime objects, values are
dict of version (str) to count (int)
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
if len(data['by_version']) == 0:
data['by_version'] = {'other': 0}
ret[cache_date] = data['by_version']
return ret |
<SYSTEM_TASK:>
Return download data by file type.
<END_TASK>
<USER_TASK:>
Description:
def per_file_type_data(self):
"""
Return download data by file type.
:return: dict of cache data; keys are datetime objects, values are
dict of file type (str) to count (int)
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
if len(data['by_file_type']) == 0:
data['by_file_type'] = {'other': 0}
ret[cache_date] = data['by_file_type']
return ret |
<SYSTEM_TASK:>
Return download data by installer name and version.
<END_TASK>
<USER_TASK:>
Description:
def per_installer_data(self):
"""
Return download data by installer name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of installer name/version (str) to count (int).
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for inst_name, inst_data in data['by_installer'].items():
for inst_ver, count in inst_data.items():
k = self._compound_column_value(
inst_name,
self._shorten_version(inst_ver)
)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
<SYSTEM_TASK:>
Return download data by python impelementation name and version.
<END_TASK>
<USER_TASK:>
Description:
def per_implementation_data(self):
"""
Return download data by python impelementation name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of implementation name/version (str) to count (int).
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for impl_name, impl_data in data['by_implementation'].items():
for impl_ver, count in impl_data.items():
k = self._compound_column_value(
impl_name,
self._shorten_version(impl_ver)
)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
<SYSTEM_TASK:>
Return download data by system.
<END_TASK>
<USER_TASK:>
Description:
def per_system_data(self):
"""
Return download data by system.
:return: dict of cache data; keys are datetime objects, values are
dict of system (str) to count (int)
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {
self._column_value(x): data['by_system'][x]
for x in data['by_system']
}
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
<SYSTEM_TASK:>
Return download data by country.
<END_TASK>
<USER_TASK:>
Description:
def per_country_data(self):
"""
Return download data by country.
:return: dict of cache data; keys are datetime objects, values are
dict of country (str) to count (int)
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for cc, count in data['by_country'].items():
k = '%s (%s)' % (self._alpha2_to_country(cc), cc)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
<SYSTEM_TASK:>
Return download data by distro name and version.
<END_TASK>
<USER_TASK:>
Description:
def per_distro_data(self):
"""
Return download data by distro name and version.
:return: dict of cache data; keys are datetime objects, values are
dict of distro name/version (str) to count (int).
:rtype: dict
""" |
ret = {}
for cache_date in self.cache_dates:
data = self._cache_get(cache_date)
ret[cache_date] = {}
for distro_name, distro_data in data['by_distro'].items():
if distro_name.lower() == 'red hat enterprise linux server':
distro_name = 'RHEL'
for distro_ver, count in distro_data.items():
ver = self._shorten_version(distro_ver, num_components=1)
if distro_name.lower() == 'os x':
ver = self._shorten_version(distro_ver,
num_components=2)
k = self._compound_column_value(distro_name, ver)
ret[cache_date][k] = count
if len(ret[cache_date]) == 0:
ret[cache_date]['unknown'] = 0
return ret |
<SYSTEM_TASK:>
Return the number of downloads per day, averaged over the past 7 days
<END_TASK>
<USER_TASK:>
Description:
def downloads_per_day(self):
"""
Return the number of downloads per day, averaged over the past 7 days
of data.
:return: average number of downloads per day
:rtype: int
""" |
count, num_days = self._downloads_for_num_days(7)
res = ceil(count / num_days)
logger.debug("Downloads per day = (%d / %d) = %d", count, num_days, res)
return res |
<SYSTEM_TASK:>
Return the number of downloads in the last 7 days.
<END_TASK>
<USER_TASK:>
Description:
def downloads_per_week(self):
"""
Return the number of downloads in the last 7 days.
:return: number of downloads in the last 7 days; if we have less than
7 days of data, returns None.
:rtype: int
""" |
if len(self.cache_dates) < 7:
logger.error("Only have %d days of data; cannot calculate "
"downloads per week", len(self.cache_dates))
return None
count, _ = self._downloads_for_num_days(7)
logger.debug("Downloads per week = %d", count)
return count |
<SYSTEM_TASK:>
Connect to the BigQuery service.
<END_TASK>
<USER_TASK:>
Description:
def _get_bigquery_service(self):
"""
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
""" |
logger.debug('Getting Google Credentials')
credentials = GoogleCredentials.get_application_default()
logger.debug('Building BigQuery service instance')
bigquery_service = build('bigquery', 'v2', credentials=credentials)
return bigquery_service |
<SYSTEM_TASK:>
Run one query against BigQuery and return the result.
<END_TASK>
<USER_TASK:>
Description:
def _run_query(self, query):
"""
Run one query against BigQuery and return the result.
:param query: the query to run
:type query: str
:return: list of per-row response dicts (key => value)
:rtype: ``list``
""" |
query_request = self.service.jobs()
logger.debug('Running query: %s', query)
start = datetime.now()
resp = query_request.query(
projectId=self.project_id, body={'query': query}
).execute()
duration = datetime.now() - start
logger.debug('Query response (in %s): %s', duration, resp)
if not resp['jobComplete']:
logger.error('Error: query reported job not complete!')
if int(resp['totalRows']) == 0:
return []
if int(resp['totalRows']) != len(resp['rows']):
logger.error('Error: query reported %s total rows, but only '
'returned %d', resp['totalRows'], len(resp['rows']))
data = []
fields = [f['name'] for f in resp['schema']['fields']]
for row in resp['rows']:
d = {}
for idx, val in enumerate(row['f']):
d[fields[idx]] = val['v']
data.append(d)
return data |
<SYSTEM_TASK:>
Return the timestamp for the newest record in the given table.
<END_TASK>
<USER_TASK:>
Description:
def _get_newest_ts_in_table(self, table_name):
"""
Return the timestamp for the newest record in the given table.
:param table_name: name of the table to query
:type table_name: str
:return: timestamp of newest row in table
:rtype: int
""" |
logger.debug(
'Querying for newest timestamp in table %s', table_name
)
q = "SELECT TIMESTAMP_TO_SEC(MAX(timestamp)) AS max_ts %s;" % (
self._from_for_table(table_name)
)
res = self._run_query(q)
ts = int(res[0]['max_ts'])
logger.debug('Newest timestamp in table %s: %s', table_name, ts)
return ts |
<SYSTEM_TASK:>
Query for download data broken down by installer, for one day.
<END_TASK>
<USER_TASK:>
Description:
def _query_by_installer(self, table_name):
"""
Query for download data broken down by installer, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by installer; keys are project
name, values are a dict of installer names to dicts of installer
version to download count.
:rtype: dict
""" |
logger.info('Querying for downloads by installer in table %s',
table_name)
q = "SELECT file.project, details.installer.name, " \
"details.installer.version, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.installer.name, " \
"details.installer.version;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
iname = row['details_installer_name']
iver = row['details_installer_version']
if iname not in proj:
proj[iname] = {}
if iver not in proj[iname]:
proj[iname][iver] = 0
proj[iname][iver] += int(row['dl_count'])
return result |
<SYSTEM_TASK:>
Query for download data broken down by system, for one day.
<END_TASK>
<USER_TASK:>
Description:
def _query_by_system(self, table_name):
"""
Query for download data broken down by system, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by system; keys are project name,
values are a dict of system names to download count.
:rtype: dict
""" |
logger.info('Querying for downloads by system in table %s',
table_name)
q = "SELECT file.project, details.system.name, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.system.name;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
for row in res:
system = row['details_system_name']
result[row['file_project']][system] = int(
row['dl_count'])
return result |
<SYSTEM_TASK:>
Query for download data broken down by OS distribution, for one day.
<END_TASK>
<USER_TASK:>
Description:
def _query_by_distro(self, table_name):
"""
Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict
""" |
logger.info('Querying for downloads by distro in table %s', table_name)
q = "SELECT file.project, details.distro.name, " \
"details.distro.version, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.distro.name, " \
"details.distro.version;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
dname = row['details_distro_name']
dver = row['details_distro_version']
if dname not in proj:
proj[dname] = {}
if dver not in proj[dname]:
proj[dname][dver] = 0
proj[dname][dver] += int(row['dl_count'])
return result |
<SYSTEM_TASK:>
Return True if we have cached data for all projects for the specified
<END_TASK>
<USER_TASK:>
Description:
def _have_cache_for_date(self, dt):
"""
Return True if we have cached data for all projects for the specified
datetime. Return False otherwise.
:param dt: datetime to find cache for
:type dt: datetime.datetime
:return: True if we have cache for all projects for this date, False
otherwise
:rtype: bool
""" |
for p in self.projects:
if self.cache.get(p, dt) is None:
return False
return True |
<SYSTEM_TASK:>
Backfill historical data for days that are missing.
<END_TASK>
<USER_TASK:>
Description:
def backfill_history(self, num_days, available_table_names):
"""
Backfill historical data for days that are missing.
:param num_days: number of days of historical data to backfill,
if missing
:type num_days: int
:param available_table_names: names of available per-date tables
:type available_table_names: ``list``
""" |
if num_days == -1:
# skip the first date, under the assumption that data may be
# incomplete
logger.info('Backfilling all available history')
start_table = available_table_names[1]
else:
logger.info('Backfilling %d days of history', num_days)
start_table = available_table_names[-1 * num_days]
start_date = self._datetime_for_table_name(start_table)
end_table = available_table_names[-3]
end_date = self._datetime_for_table_name(end_table)
logger.debug(
'Backfilling history from %s (%s) to %s (%s)', start_table,
start_date.strftime('%Y-%m-%d'), end_table,
end_date.strftime('%Y-%m-%d')
)
for days in range((end_date - start_date).days + 1):
backfill_dt = start_date + timedelta(days=days)
if self._have_cache_for_date(backfill_dt):
logger.info('Cache present for all projects for %s; skipping',
backfill_dt.strftime('%Y-%m-%d'))
continue
backfill_table = self._table_name_for_datetime(backfill_dt)
logger.info('Backfilling %s (%s)', backfill_table,
backfill_dt.strftime('%Y-%m-%d'))
self.query_one_table(backfill_table) |
<SYSTEM_TASK:>
Run the data queries for the specified projects.
<END_TASK>
<USER_TASK:>
Description:
def run_queries(self, backfill_num_days=7):
"""
Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int
""" |
available_tables = self._get_download_table_ids()
logger.debug('Found %d available download tables: %s',
len(available_tables), available_tables)
today_table = available_tables[-1]
yesterday_table = available_tables[-2]
self.query_one_table(today_table)
self.query_one_table(yesterday_table)
self.backfill_history(backfill_num_days, available_tables) |
<SYSTEM_TASK:>
Generate the HTML for the specified graphs.
<END_TASK>
<USER_TASK:>
Description:
def _generate_html(self):
"""
Generate the HTML for the specified graphs.
:return:
:rtype:
""" |
logger.debug('Generating templated HTML')
env = Environment(
loader=PackageLoader('pypi_download_stats', 'templates'),
extensions=['jinja2.ext.loopcontrols'])
env.filters['format_date_long'] = filter_format_date_long
env.filters['format_date_ymd'] = filter_format_date_ymd
env.filters['data_columns'] = filter_data_columns
template = env.get_template('base.html')
logger.debug('Rendering template')
html = template.render(
project=self.project_name,
cache_date=self._stats.as_of_datetime,
user=getuser(),
host=platform_node(),
version=VERSION,
proj_url=PROJECT_URL,
graphs=self._graphs,
graph_keys=self.GRAPH_KEYS,
resources=Resources(mode='inline').render(),
badges=self._badges
)
logger.debug('Template rendered')
return html |
<SYSTEM_TASK:>
Find the per-day average of each series in the data over the last 7
<END_TASK>
<USER_TASK:>
Description:
def _limit_data(self, data):
"""
Find the per-day average of each series in the data over the last 7
days; drop all but the top 10.
:param data: original graph data
:type data: dict
:return: dict containing only the top 10 series, based on average over
the last 7 days.
:rtype: dict
""" |
if len(data.keys()) <= 10:
logger.debug("Data has less than 10 keys; not limiting")
return data
# average last 7 days of each series
avgs = {}
for k in data:
if len(data[k]) <= 7:
vals = data[k]
else:
vals = data[k][-7:]
avgs[k] = sum(vals) / len(vals)
# hold state
final_data = {} # final data dict
other = [] # values for dropped/'other' series
count = 0 # iteration counter
# iterate the sorted averages; either drop or keep
for k in sorted(avgs, key=avgs.get, reverse=True):
if count < 10:
final_data[k] = data[k]
logger.debug("Keeping data series %s (average over last 7 "
"days of data: %d", k, avgs[k])
else:
logger.debug("Adding data series %s to 'other' (average over "
"last 7 days of data: %d", k, avgs[k])
other.append(data[k])
count += 1
# sum up the other data and add to final
final_data['other'] = [sum(series) for series in zip(*other)]
return final_data |
<SYSTEM_TASK:>
Generate a downloads graph; append it to ``self._graphs``.
<END_TASK>
<USER_TASK:>
Description:
def _generate_graph(self, name, title, stats_data, y_name):
"""
Generate a downloads graph; append it to ``self._graphs``.
:param name: HTML name of the graph, also used in ``self.GRAPH_KEYS``
:type name: str
:param title: human-readable title for the graph
:type title: str
:param stats_data: data dict from ``self._stats``
:type stats_data: dict
:param y_name: Y axis metric name
:type y_name: str
""" |
logger.debug('Generating chart data for %s graph', name)
orig_data, labels = self._data_dict_to_bokeh_chart_data(stats_data)
data = self._limit_data(orig_data)
logger.debug('Generating %s graph', name)
script, div = FancyAreaGraph(
name, '%s %s' % (self.project_name, title), data, labels,
y_name).generate_graph()
logger.debug('%s graph generated', name)
self._graphs[name] = {
'title': title,
'script': script,
'div': div,
'raw_data': stats_data
} |
<SYSTEM_TASK:>
Generate all output types and write to disk.
<END_TASK>
<USER_TASK:>
Description:
def generate(self):
"""
Generate all output types and write to disk.
""" |
logger.info('Generating graphs')
self._generate_graph(
'by-version',
'Downloads by Version',
self._stats.per_version_data,
'Version'
)
self._generate_graph(
'by-file-type',
'Downloads by File Type',
self._stats.per_file_type_data,
'File Type'
)
self._generate_graph(
'by-installer',
'Downloads by Installer',
self._stats.per_installer_data,
'Installer'
)
self._generate_graph(
'by-implementation',
'Downloads by Python Implementation/Version',
self._stats.per_implementation_data,
'Implementation/Version'
)
self._generate_graph(
'by-system',
'Downloads by System Type',
self._stats.per_system_data,
'System'
)
self._generate_graph(
'by-country',
'Downloads by Country',
self._stats.per_country_data,
'Country'
)
self._generate_graph(
'by-distro',
'Downloads by Distro',
self._stats.per_distro_data,
'Distro'
)
self._generate_badges()
logger.info('Generating HTML')
html = self._generate_html()
html_path = os.path.join(self.output_dir, 'index.html')
with open(html_path, 'wb') as fh:
fh.write(html.encode('utf-8'))
logger.info('HTML report written to %s', html_path)
logger.info('Writing SVG badges')
for name, svg in self._badges.items():
path = os.path.join(self.output_dir, '%s.svg' % name)
with open(path, 'w') as fh:
fh.write(svg)
logger.info('%s badge written to: %s', name, path) |
<SYSTEM_TASK:>
Get a current DateTime object. By default is local.
<END_TASK>
<USER_TASK:>
Description:
def now(utc=False, tz=None):
"""
Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime
""" |
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz) |
<SYSTEM_TASK:>
Cross platform compatible subprocess with CompletedProcess return.
<END_TASK>
<USER_TASK:>
Description:
def run(command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
timeout=None, copy_local_env=False, **kwargs):
"""
Cross platform compatible subprocess with CompletedProcess return.
No formatting or encoding is performed on the output of subprocess, so it's
output will appear the same on each version / interpreter as before.
.. code:: python
reusables.run('echo "hello world!', shell=True)
# CPython 3.6
# CompletedProcess(args='echo "hello world!', returncode=0,
# stdout=b'"hello world!\\r\\n', stderr=b'')
#
# PyPy 5.4 (Python 2.7.10)
# CompletedProcess(args='echo "hello world!', returncode=0L,
# stdout='"hello world!\\r\\n')
Timeout is only usable in Python 3.X, as it was not implemented before then,
a NotImplementedError will be raised if specified on 2.x version of Python.
:param command: command to run, str if shell=True otherwise must be list
:param input: send something `communicate`
:param stdout: PIPE or None
:param stderr: PIPE or None
:param timeout: max time to wait for command to complete
:param copy_local_env: Use all current ENV vars in the subprocess as well
:param kwargs: additional arguments to pass to Popen
:return: CompletedProcess class
""" |
if copy_local_env:
# Copy local env first and overwrite with anything manually specified
env = os.environ.copy()
env.update(kwargs.get('env', {}))
else:
env = kwargs.get('env')
if sys.version_info >= (3, 5):
return subprocess.run(command, input=input, stdout=stdout,
stderr=stderr, timeout=timeout, env=env,
**kwargs)
# Created here instead of root level as it should never need to be
# manually created or referenced
class CompletedProcess(object):
"""A backwards compatible near clone of subprocess.CompletedProcess"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = ['args={0!r}'.format(self.args),
'returncode={0!r}'.format(self.returncode),
'stdout={0!r}'.format(self.stdout) if self.stdout else '',
'stderr={0!r}'.format(self.stderr) if self.stderr else '']
return "{0}({1})".format(type(self).__name__,
', '.join(filter(None, args)))
def check_returncode(self):
if self.returncode:
if python_version < (2, 7):
raise subprocess.CalledProcessError(self.returncode,
self.args)
raise subprocess.CalledProcessError(self.returncode,
self.args,
self.stdout)
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr,
env=env, **kwargs)
if PY3:
out, err = proc.communicate(input=input, timeout=timeout)
else:
if timeout:
raise NotImplementedError("Timeout is only available on Python 3")
out, err = proc.communicate(input=input)
return CompletedProcess(command, proc.returncode, out, err) |
<SYSTEM_TASK:>
Run a set of iterables to a function in a Threaded or MP Pool.
<END_TASK>
<USER_TASK:>
Description:
def run_in_pool(target, iterable, threaded=True, processes=4,
asynchronous=False, target_kwargs=None):
""" Run a set of iterables to a function in a Threaded or MP Pool.
.. code: python
def func(a):
return a + a
reusables.run_in_pool(func, [1,2,3,4,5])
# [1, 4, 9, 16, 25]
:param target: function to run
:param iterable: positional arg to pass to function
:param threaded: Threaded if True multiprocessed if False
:param processes: Number of workers
:param asynchronous: will do map_async if True
:param target_kwargs: Keyword arguments to set on the function as a partial
:return: pool results
""" |
my_pool = pool.ThreadPool if threaded else pool.Pool
if target_kwargs:
target = partial(target, **target_kwargs if target_kwargs else None)
p = my_pool(processes)
try:
results = (p.map_async(target, iterable) if asynchronous
else p.map(target, iterable))
finally:
p.close()
p.join()
return results |
<SYSTEM_TASK:>
View a dictionary as a tree.
<END_TASK>
<USER_TASK:>
Description:
def tree_view(dictionary, level=0, sep="| "):
"""
View a dictionary as a tree.
""" |
return "".join(["{0}{1}\n{2}".format(sep * level, k,
tree_view(v, level + 1, sep=sep) if isinstance(v, dict)
else "") for k, v in dictionary.items()]) |
<SYSTEM_TASK:>
Turn the Namespace and sub Namespaces back into a native
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self, in_dict=None):
"""
Turn the Namespace and sub Namespaces back into a native
python dictionary.
:param in_dict: Do not use, for self recursion
:return: python dictionary of this Namespace
""" |
in_dict = in_dict if in_dict else self
out_dict = dict()
for k, v in in_dict.items():
if isinstance(v, Namespace):
v = v.to_dict()
out_dict[k] = v
return out_dict |
<SYSTEM_TASK:>
Return value of key as a list
<END_TASK>
<USER_TASK:>
Description:
def list(self, item, default=None, spliter=",", strip=True, mod=None):
""" Return value of key as a list
:param item: key of value to transform
:param mod: function to map against list
:param default: value to return if item does not exist
:param spliter: character to split str on
:param strip: clean the list with the `strip`
:return: list of items
""" |
try:
item = self.__getattr__(item)
except AttributeError as err:
if default is not None:
return default
raise err
if strip:
item = item.lstrip("[").rstrip("]")
out = [x.strip() if strip else x for x in item.split(spliter)]
if mod:
return list(map(mod, out))
return out |
<SYSTEM_TASK:>
Download a given URL to either file or memory
<END_TASK>
<USER_TASK:>
Description:
def download(url, save_to_file=True, save_dir=".", filename=None,
block_size=64000, overwrite=False, quiet=False):
"""
Download a given URL to either file or memory
:param url: Full url (with protocol) of path to download
:param save_to_file: boolean if it should be saved to file or not
:param save_dir: location of saved file, default is current working dir
:param filename: filename to save as
:param block_size: download chunk size
:param overwrite: overwrite file if it already exists
:param quiet: boolean to turn off logging for function
:return: save location (or content if not saved to file)
""" |
if save_to_file:
if not filename:
filename = safe_filename(url.split('/')[-1])
if not filename:
filename = "downloaded_at_{}.file".format(time.time())
save_location = os.path.abspath(os.path.join(save_dir, filename))
if os.path.exists(save_location) and not overwrite:
logger.error("File {0} already exists".format(save_location))
return False
else:
save_location = "memory"
try:
request = urlopen(url)
except ValueError as err:
if not quiet and "unknown url type" in str(err):
logger.error("Please make sure URL is formatted correctly and"
" starts with http:// or other protocol")
raise err
except Exception as err:
if not quiet:
logger.error("Could not download {0} - {1}".format(url, err))
raise err
try:
kb_size = int(request.headers["Content-Length"]) / 1024
except Exception as err:
if not quiet:
logger.debug("Could not determine file size - {0}".format(err))
file_size = "(unknown size)"
else:
file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999
else (kb_size / 1024, "MB"))
if not quiet:
logger.info("Downloading {0} {1} to {2}".format(url, file_size,
save_location))
if save_to_file:
with open(save_location, "wb") as f:
while True:
buffer = request.read(block_size)
if not buffer:
break
f.write(buffer)
return save_location
else:
return request.read() |
<SYSTEM_TASK:>
Provide a list of IP addresses, uses `socket.getaddrinfo`
<END_TASK>
<USER_TASK:>
Description:
def url_to_ips(url, port=None, ipv6=False, connect_type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP, flags=0):
"""
Provide a list of IP addresses, uses `socket.getaddrinfo`
.. code:: python
reusables.url_to_ips("example.com", ipv6=True)
# ['2606:2800:220:1:248:1893:25c8:1946']
:param url: hostname to resolve to IP addresses
:param port: port to send to getaddrinfo
:param ipv6: Return IPv6 address if True, otherwise IPv4
:param connect_type: defaults to STREAM connection, can be 0 for all
:param proto: defaults to TCP, can be 0 for all
:param flags: additional flags to pass
:return: list of resolved IPs
""" |
try:
results = socket.getaddrinfo(url, port,
(socket.AF_INET if not ipv6
else socket.AF_INET6),
connect_type,
proto,
flags)
except socket.gaierror:
logger.exception("Could not resolve hostname")
return []
return list(set([result[-1][0] for result in results])) |
<SYSTEM_TASK:>
Resolve a hostname based off an IP address.
<END_TASK>
<USER_TASK:>
Description:
def ip_to_url(ip_addr):
"""
Resolve a hostname based off an IP address.
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records.
.. code:: python
reusables.ip_to_url('93.184.216.34') # example.com
# None
reusables.ip_to_url('8.8.8.8')
# 'google-public-dns-a.google.com'
:param ip_addr: IP address to resolve to hostname
:return: string of hostname or None
""" |
try:
return socket.gethostbyaddr(ip_addr)[0]
except (socket.gaierror, socket.herror):
logger.exception("Could not resolve hostname") |
<SYSTEM_TASK:>
Create a background thread for httpd and serve 'forever
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Create a background thread for httpd and serve 'forever'""" |
self._process = threading.Thread(target=self._background_runner)
self._process.start() |
<SYSTEM_TASK:>
Returns a set up stream handler to add to a logger.
<END_TASK>
<USER_TASK:>
Description:
def get_stream_handler(stream=sys.stderr, level=logging.INFO,
log_format=log_formats.easy_read):
"""
Returns a set up stream handler to add to a logger.
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
:return: stream handler
""" |
sh = logging.StreamHandler(stream)
sh.setLevel(level)
sh.setFormatter(logging.Formatter(log_format))
return sh |
<SYSTEM_TASK:>
Set up a file handler to add to a logger.
<END_TASK>
<USER_TASK:>
Description:
def get_file_handler(file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read,
handler=logging.FileHandler,
**handler_kwargs):
"""
Set up a file handler to add to a logger.
:param file_path: file to write the log to, defaults to out.log
:param level: logging level to set handler at
:param log_format: formatter to use
:param handler: logging handler to use, defaults to FileHandler
:param handler_kwargs: options to pass to the handler
:return: handler
""" |
fh = handler(file_path, **handler_kwargs)
fh.setLevel(level)
fh.setFormatter(logging.Formatter(log_format))
return fh |
<SYSTEM_TASK:>
Grabs the specified logger and adds wanted handlers to it. Will
<END_TASK>
<USER_TASK:>
Description:
def setup_logger(module_name=None, level=logging.INFO, stream=sys.stderr,
file_path=None, log_format=log_formats.easy_read,
suppress_warning=True):
"""
Grabs the specified logger and adds wanted handlers to it. Will
default to adding a stream handler.
:param module_name: logger name to use
:param level: logging level to set logger at
:param stream: stream to log to, or None
:param file_path: file path to log to, or None
:param log_format: format to set the handlers to use
:param suppress_warning: add a NullHandler if no other handler is specified
:return: configured logger
""" |
new_logger = logging.getLogger(module_name)
if stream:
new_logger.addHandler(get_stream_handler(stream, level, log_format))
elif not file_path and suppress_warning and not new_logger.handlers:
new_logger.addHandler(logging.NullHandler())
if file_path:
new_logger.addHandler(get_file_handler(file_path, level, log_format))
if level > 0:
new_logger.setLevel(level)
return new_logger |
<SYSTEM_TASK:>
Addes a newly created stream handler to the specified logger
<END_TASK>
<USER_TASK:>
Description:
def add_stream_handler(logger=None, stream=sys.stderr, level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created stream handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param stream: which stream to use, defaults to sys.stderr
:param level: logging level to set handler at
:param log_format: formatter to use
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_stream_handler(stream, level, log_format)) |
<SYSTEM_TASK:>
Addes a newly created file handler to the specified logger
<END_TASK>
<USER_TASK:>
Description:
def add_file_handler(logger=None, file_path="out.log", level=logging.INFO,
log_format=log_formats.easy_read):
"""
Addes a newly created file handler to the specified logger
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: formatter to use
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format)) |
<SYSTEM_TASK:>
Adds a rotating file handler to the specified logger.
<END_TASK>
<USER_TASK:>
Description:
def add_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
max_bytes=10*sizes.mb, backup_count=5,
**handler_kwargs):
""" Adds a rotating file handler to the specified logger.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param max_bytes: Max file size in bytes before rotating
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=RotatingFileHandler,
maxBytes=max_bytes,
backupCount=backup_count,
**handler_kwargs)) |
<SYSTEM_TASK:>
Adds a timed rotating file handler to the specified logger.
<END_TASK>
<USER_TASK:>
Description:
def add_timed_rotating_file_handler(logger=None, file_path="out.log",
level=logging.INFO,
log_format=log_formats.easy_read,
when='w0', interval=1, backup_count=5,
**handler_kwargs):
""" Adds a timed rotating file handler to the specified logger.
Defaults to weekly rotation, with 5 backups.
:param logger: logging name or object to modify, defaults to root logger
:param file_path: path to file to log to
:param level: logging level to set handler at
:param log_format: log formatter
:param when:
:param interval:
:param backup_count: Number of backup files
:param handler_kwargs: options to pass to the handler
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.addHandler(get_file_handler(file_path, level, log_format,
handler=TimedRotatingFileHandler,
when=when,
interval=interval,
backupCount=backup_count,
**handler_kwargs)) |
<SYSTEM_TASK:>
Remove only stream handlers from the specified logger
<END_TASK>
<USER_TASK:>
Description:
def remove_stream_handlers(logger=None):
"""
Remove only stream handlers from the specified logger
:param logger: logging name or object to modify, defaults to root logger
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
# FileHandler is a subclass of StreamHandler so
# 'if not a StreamHandler' does not work
if (isinstance(handler, logging.FileHandler) or
isinstance(handler, logging.NullHandler) or
(isinstance(handler, logging.Handler) and not
isinstance(handler, logging.StreamHandler))):
new_handlers.append(handler)
logger.handlers = new_handlers |
<SYSTEM_TASK:>
Remove only file handlers from the specified logger. Will go through
<END_TASK>
<USER_TASK:>
Description:
def remove_file_handlers(logger=None):
"""
Remove only file handlers from the specified logger. Will go through
and close each handler for safety.
:param logger: logging name or object to modify, defaults to root logger
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
new_handlers = []
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
else:
new_handlers.append(handler)
logger.handlers = new_handlers |
<SYSTEM_TASK:>
Safely remove all handlers from the logger
<END_TASK>
<USER_TASK:>
Description:
def remove_all_handlers(logger=None):
"""
Safely remove all handlers from the logger
:param logger: logging name or object to modify, defaults to root logger
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
remove_file_handlers(logger)
logger.handlers = [] |
<SYSTEM_TASK:>
Go through the logger and handlers and update their levels to the
<END_TASK>
<USER_TASK:>
Description:
def change_logger_levels(logger=None, level=logging.DEBUG):
"""
Go through the logger and handlers and update their levels to the
one specified.
:param logger: logging name or object to modify, defaults to root logger
:param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)
""" |
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
logger.setLevel(level)
for handler in logger.handlers:
handler.level = level |
<SYSTEM_TASK:>
Find the names of all loggers currently registered
<END_TASK>
<USER_TASK:>
Description:
def get_registered_loggers(hide_children=False, hide_reusables=False):
"""
Find the names of all loggers currently registered
:param hide_children: only return top level logger names
:param hide_reusables: hide the reusables loggers
:return: list of logger names
""" |
return [logger for logger in logging.Logger.manager.loggerDict.keys()
if not (hide_reusables and "reusables" in logger)
and not (hide_children and "." in logger)] |
<SYSTEM_TASK:>
Wrapper. Makes sure the function's return value has not been returned before
<END_TASK>
<USER_TASK:>
Description:
def unique(max_retries=10, wait=0, alt_return="-no_alt_return-",
exception=Exception, error_text=None):
"""
Wrapper. Makes sure the function's return value has not been returned before
or else it run with the same inputs again.
.. code: python
import reusables
import random
@reusables.unique(max_retries=100)
def poor_uuid():
return random.randint(0, 10)
print([poor_uuid() for _ in range(10)])
# [8, 9, 6, 3, 0, 7, 2, 5, 4, 10]
print([poor_uuid() for _ in range(100)])
# Exception: No result was unique
Message format options: {func} {args} {kwargs}
:param max_retries: int of number of retries to attempt before failing
:param wait: float of seconds to wait between each try, defaults to 0
:param exception: Exception type of raise
:param error_text: text of the exception
:param alt_return: if specified, an exception is not raised on failure,
instead the provided value of any type of will be returned
""" |
def func_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (error_text if error_text else
"No result was unique for function '{func}'")
if not error_text:
msg = _add_args(msg, *args, **kwargs)
for i in range(max_retries):
value = func(*args, **kwargs)
if value not in unique_cache[func.__name__]:
unique_cache[func.__name__].append(value)
return value
if wait:
time.sleep(wait)
else:
if alt_return != "-no_alt_return-":
return alt_return
raise exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
return wrapper
return func_wrap |
<SYSTEM_TASK:>
Wrapper. Simple wrapper to make sure a function is only run once at a time.
<END_TASK>
<USER_TASK:>
Description:
def lock_it(lock=g_lock):
"""
Wrapper. Simple wrapper to make sure a function is only run once at a time.
.. code: python
import reusables
import time
def func_one(_):
time.sleep(5)
@reusables.lock_it()
def func_two(_):
time.sleep(5)
@reusables.time_it(message="test_1 took {0:.2f} seconds")
def test_1():
reusables.run_in_pool(func_one, (1, 2, 3), threaded=True)
@reusables.time_it(message="test_2 took {0:.2f} seconds")
def test_2():
reusables.run_in_pool(func_two, (1, 2, 3), threaded=True)
test_1()
test_2()
# test_1 took 5.04 seconds
# test_2 took 15.07 seconds
:param lock: Which lock to use, uses unique default
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
Wrapper. Time the amount of time it takes the execution of the function
<END_TASK>
<USER_TASK:>
Description:
def time_it(log=None, message=None, append=None):
"""
Wrapper. Time the amount of time it takes the execution of the function
and print it.
If log is true, make sure to set the logging level of 'reusables' to INFO
level or lower.
.. code:: python
import time
import reusables
reusables.add_stream_handler('reusables')
@reusables.time_it(log=True, message="{seconds:.2f} seconds")
def test_time(length):
time.sleep(length)
return "slept {0}".format(length)
result = test_time(5)
# 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds
print(result)
# slept 5
Message format options: {func} {seconds} {args} {kwargs}
:param log: log as INFO level instead of printing
:param message: string to format with total time as the only input
:param append: list to append item too
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
# Can't use nonlocal in 2.x
msg = (message if message else
"Function '{func}' took a total of {seconds} seconds")
if not message:
msg = _add_args(msg, *args, **kwargs)
time_func = (time.perf_counter if python_version >= (3, 3)
else time.time)
start_time = time_func()
try:
return func(*args, **kwargs)
finally:
total_time = time_func() - start_time
time_string = msg.format(func=func.__name__,
seconds=total_time,
args=args, kwargs=kwargs)
if log:
my_logger = logging.getLogger(log) if isinstance(log, str)\
else logger
my_logger.info(time_string)
else:
print(time_string)
if isinstance(append, list):
append.append(total_time)
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
Wrapper. Instead of returning the result of the function, add it to a queue.
<END_TASK>
<USER_TASK:>
Description:
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
Wrapper. Log the traceback to any exceptions raised. Possible to raise
<END_TASK>
<USER_TASK:>
Description:
def log_exception(log="reusables", message=None, exceptions=(Exception, ),
level=logging.ERROR, show_traceback=True):
"""
Wrapper. Log the traceback to any exceptions raised. Possible to raise
custom exception.
.. code :: python
@reusables.log_exception()
def test():
raise Exception("Bad")
# 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# File "reusables\wrappers.py", line 200, in wrapper
# raise err
# Exception: Bad
Message format options: {func} {err} {args} {kwargs}
:param exceptions: types of exceptions to catch
:param log: log name to use
:param message: message to use in log
:param level: logging level
:param show_traceback: include full traceback or just error message
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = message if message else "Exception in '{func}': {err}"
if not message:
msg = _add_args(msg, *args, **kwargs)
try:
return func(*args, **kwargs)
except exceptions as err:
my_logger = (logging.getLogger(log) if isinstance(log, str)
else log)
my_logger.log(level, msg.format(func=func.__name__,
err=str(err),
args=args, kwargs=kwargs),
exc_info=show_traceback)
raise err
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
If the function encounters an exception, catch it, and
<END_TASK>
<USER_TASK:>
Description:
def catch_it(exceptions=(Exception, ), default=None, handler=None):
"""
If the function encounters an exception, catch it, and
return the specified default or sent to a handler function instead.
.. code :: python
def handle_error(exception, func, *args, **kwargs):
print(f"{func.__name__} raised {exception} when called with {args}")
@reusables.catch_it(handler=err_func)
def will_raise(message="Hello")
raise Exception(message)
:param exceptions: tuple of exceptions to catch
:param default: what to return if the exception is caught
:param handler: function to send exception, func, *args and **kwargs
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions as err:
if handler:
return handler(err, func, *args, **kwargs)
return default
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
Retry a function if an exception is raised, or if output_check returns
<END_TASK>
<USER_TASK:>
Description:
def retry_it(exceptions=(Exception, ), tries=10, wait=0, handler=None,
raised_exception=ReusablesError, raised_message=None):
"""
Retry a function if an exception is raised, or if output_check returns
False.
Message format options: {func} {args} {kwargs}
:param exceptions: tuple of exceptions to catch
:param tries: number of tries to retry the function
:param wait: time to wait between executions in seconds
:param handler: function to check if output is valid, must return bool
:param raised_exception: default is ReusablesError
:param raised_message: message to pass to raised exception
""" |
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = (raised_message if raised_message
else "Max retries exceeded for function '{func}'")
if not raised_message:
msg = _add_args(msg, *args, **kwargs)
try:
result = func(*args, **kwargs)
except exceptions:
if tries:
if wait:
time.sleep(wait)
return retry_it(exceptions=exceptions, tries=tries-1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
if raised_exception:
exc = raised_exception(msg.format(func=func.__name__,
args=args, kwargs=kwargs))
exc.__cause__ = None
raise exc
else:
if handler:
if not handler(result):
return retry_it(exceptions=exceptions, tries=tries - 1,
handler=handler,
wait=wait)(func)(*args, **kwargs)
return result
return wrapper
return func_wrapper |
<SYSTEM_TASK:>
Automatically detect archive type and extract all files to specified path.
<END_TASK>
<USER_TASK:>
Description:
def extract(archive_file, path=".", delete_on_success=False,
enable_rar=False):
"""
Automatically detect archive type and extract all files to specified path.
.. code:: python
import os
os.listdir(".")
# ['test_structure.zip']
reusables.extract("test_structure.zip")
os.listdir(".")
# [ 'test_structure', 'test_structure.zip']
:param archive_file: path to file to extract
:param path: location to extract to
:param delete_on_success: Will delete the original archive if set to True
:param enable_rar: include the rarfile import and extract
:return: path to extracted files
""" |
if not os.path.exists(archive_file) or not os.path.getsize(archive_file):
logger.error("File {0} unextractable".format(archive_file))
raise OSError("File does not exist or has zero size")
arch = None
if zipfile.is_zipfile(archive_file):
logger.debug("File {0} detected as a zip file".format(archive_file))
arch = zipfile.ZipFile(archive_file)
elif tarfile.is_tarfile(archive_file):
logger.debug("File {0} detected as a tar file".format(archive_file))
arch = tarfile.open(archive_file)
elif enable_rar:
import rarfile
if rarfile.is_rarfile(archive_file):
logger.debug("File {0} detected as "
"a rar file".format(archive_file))
arch = rarfile.RarFile(archive_file)
if not arch:
raise TypeError("File is not a known archive")
logger.debug("Extracting files to {0}".format(path))
try:
arch.extractall(path=path)
finally:
arch.close()
if delete_on_success:
logger.debug("Archive {0} will now be deleted".format(archive_file))
os.unlink(archive_file)
return os.path.abspath(path) |
<SYSTEM_TASK:>
Open and load data from a JSON file
<END_TASK>
<USER_TASK:>
Description:
def load_json(json_file, **kwargs):
"""
Open and load data from a JSON file
.. code:: python
reusables.load_json("example.json")
# {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}
:param json_file: Path to JSON file as string
:param kwargs: Additional arguments for the json.load command
:return: Dictionary
""" |
with open(json_file) as f:
return json.load(f, **kwargs) |
<SYSTEM_TASK:>
Takes a dictionary and saves it to a file as JSON
<END_TASK>
<USER_TASK:>
Description:
def save_json(data, json_file, indent=4, **kwargs):
"""
Takes a dictionary and saves it to a file as JSON
.. code:: python
my_dict = {"key_1": "val_1",
"key_for_dict": {"sub_dict_key": 8}}
reusables.save_json(my_dict,"example.json")
example.json
.. code::
{
"key_1": "val_1",
"key_for_dict": {
"sub_dict_key": 8
}
}
:param data: dictionary to save as JSON
:param json_file: Path to save file location as str
:param indent: Format the JSON file with so many numbers of spaces
:param kwargs: Additional arguments for the json.dump command
""" |
with open(json_file, "w") as f:
json.dump(data, f, indent=indent, **kwargs) |
<SYSTEM_TASK:>
Return configuration options as a Namespace.
<END_TASK>
<USER_TASK:>
Description:
def config_namespace(config_file=None, auto_find=False,
verify=True, **cfg_options):
"""
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
""" |
return ConfigNamespace(**config_dict(config_file, auto_find,
verify, **cfg_options)) |
<SYSTEM_TASK:>
Internal function to return walk generator either from os or scandir
<END_TASK>
<USER_TASK:>
Description:
def _walk(directory, enable_scandir=False, **kwargs):
"""
Internal function to return walk generator either from os or scandir
:param directory: directory to traverse
:param enable_scandir: on python < 3.5 enable external scandir package
:param kwargs: arguments to pass to walk function
:return: walk generator
""" |
walk = os.walk
if python_version < (3, 5) and enable_scandir:
import scandir
walk = scandir.walk
return walk(directory, **kwargs) |
<SYSTEM_TASK:>
Return a directories contents as a dictionary hierarchy.
<END_TASK>
<USER_TASK:>
Description:
def os_tree(directory, enable_scandir=False):
"""
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
""" |
if not os.path.exists(directory):
raise OSError("Directory does not exist")
if not os.path.isdir(directory):
raise OSError("Path is not a directory")
full_list = []
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep
for d in dirs])
tree = {os.path.basename(directory): {}}
for item in full_list:
separated = item.split(os.sep)
is_dir = separated[-1:] == ['']
if is_dir:
separated = separated[:-1]
parent = tree[os.path.basename(directory)]
for index, path in enumerate(separated):
if path in parent:
parent = parent[path]
continue
else:
parent[path] = dict()
parent = parent[path]
return tree |
<SYSTEM_TASK:>
Hash a given file with md5, or any other and return the hex digest. You
<END_TASK>
<USER_TASK:>
Description:
def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
"""
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
""" |
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest() |
<SYSTEM_TASK:>
Walk through a file directory and return an iterator of files
<END_TASK>
<USER_TASK:>
Description:
def find_files(directory=".", ext=None, name=None,
match_case=False, disable_glob=False, depth=None,
abspath=False, enable_scandir=False):
"""
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
""" |
if ext or not name:
disable_glob = True
if not disable_glob:
disable_glob = not glob.has_magic(name)
if ext and isinstance(ext, str):
ext = [ext]
elif ext and not isinstance(ext, (list, tuple)):
raise TypeError("extension must be either one extension or a list")
if abspath:
directory = os.path.abspath(directory)
starting_depth = directory.count(os.sep)
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
if depth and root.count(os.sep) - starting_depth >= depth:
continue
if not disable_glob:
if match_case:
raise ValueError("Cannot use glob and match case, please "
"either disable glob or not set match_case")
glob_generator = glob.iglob(os.path.join(root, name))
for item in glob_generator:
yield item
continue
for file_name in files:
if ext:
for end in ext:
if file_name.lower().endswith(end.lower() if not
match_case else end):
break
else:
continue
if name:
if match_case and name not in file_name:
continue
elif name.lower() not in file_name.lower():
continue
yield os.path.join(root, file_name) |
<SYSTEM_TASK:>
Remove all empty folders from a path. Returns list of empty directories.
<END_TASK>
<USER_TASK:>
Description:
def remove_empty_directories(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty folders from a path. Returns list of empty directories.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed directories
""" |
listdir = os.listdir
if python_version < (3, 5) and enable_scandir:
import scandir as _scandir
def listdir(directory):
return list(_scandir.scandir(directory))
directory_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir,
topdown=False):
if (not directories and not files and os.path.exists(root) and
root != root_directory and os.path.isdir(root)):
directory_list.append(root)
if not dry_run:
try:
os.rmdir(root)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(root))
else:
raise err
elif directories and not files:
for directory in directories:
directory = join_paths(root, directory, strict=True)
if (os.path.exists(directory) and os.path.isdir(directory) and
not listdir(directory)):
directory_list.append(directory)
if not dry_run:
try:
os.rmdir(directory)
except OSError as err:
if ignore_errors:
logger.info("{0} could not be deleted".format(
directory))
else:
raise err
return directory_list |
<SYSTEM_TASK:>
Remove all empty files from a path. Returns list of the empty files removed.
<END_TASK>
<USER_TASK:>
Description:
def remove_empty_files(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
"""
Remove all empty files from a path. Returns list of the empty files removed.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed files
""" |
file_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir):
for file_name in files:
file_path = join_paths(root, file_name, strict=True)
if os.path.isfile(file_path) and not os.path.getsize(file_path):
if file_hash(file_path) == variables.hashes.empty_file.md5:
file_list.append(file_path)
file_list = sorted(set(file_list))
if not dry_run:
for afile in file_list:
try:
os.unlink(afile)
except OSError as err:
if ignore_errors:
logger.info("File {0} could not be deleted".format(afile))
else:
raise err
return file_list |
<SYSTEM_TASK:>
Check a directory for duplicates of the specified file. This is meant
<END_TASK>
<USER_TASK:>
Description:
def dup_finder(file_path, directory=".", enable_scandir=False):
"""
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
""" |
size = os.path.getsize(file_path)
if size == 0:
for empty_file in remove_empty_files(directory, dry_run=True):
yield empty_file
else:
with open(file_path, 'rb') as f:
first_twenty = f.read(20)
file_sha256 = file_hash(file_path, "sha256")
for root, directories, files in _walk(directory,
enable_scandir=enable_scandir):
for each_file in files:
test_file = os.path.join(root, each_file)
if os.path.getsize(test_file) == size:
try:
with open(test_file, 'rb') as f:
test_first_twenty = f.read(20)
except OSError:
logger.warning("Could not open file to compare - "
"{0}".format(test_file))
else:
if first_twenty == test_first_twenty:
if file_hash(test_file, "sha256") == file_sha256:
yield os.path.abspath(test_file) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.