text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_ns_run_logls(run, dup_assert=False, dup_warn=False): """Check run logls are unique and in the correct order. Parameters run: dict nested sampling run to check. dup_assert: bool, optional Whether to raise and AssertionError if there are duplicate logl values. dup_warn: bool, optional Whether to give a UserWarning if there are duplicate logl values (only used if dup_assert is False). Raises ------ AssertionError if run does not have expected properties. """
assert np.array_equal(run['logl'], run['logl'][np.argsort(run['logl'])]) if dup_assert or dup_warn: unique_logls, counts = np.unique(run['logl'], return_counts=True) repeat_logls = run['logl'].shape[0] - unique_logls.shape[0] msg = ('{} duplicate logl values (out of a total of {}). This may be ' 'caused by limited numerical precision in the output files.' '\nrepeated logls = {}\ncounts = {}\npositions in list of {}' ' unique logls = {}').format( repeat_logls, run['logl'].shape[0], unique_logls[counts != 1], counts[counts != 1], unique_logls.shape[0], np.where(counts != 1)[0]) if dup_assert: assert repeat_logls == 0, msg elif dup_warn: if repeat_logls != 0: warnings.warn(msg, UserWarning)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_ns_run_threads(run): """Check thread labels and thread_min_max have expected properties. Parameters run: dict Nested sampling run to check. Raises ------ AssertionError If run does not have expected properties. """
assert run['thread_labels'].dtype == int uniq_th = np.unique(run['thread_labels']) assert np.array_equal( np.asarray(range(run['thread_min_max'].shape[0])), uniq_th), \ str(uniq_th) # Check thread_min_max assert np.any(run['thread_min_max'][:, 0] == -np.inf), ( 'Run should have at least one thread which starts by sampling the ' + 'whole prior') for th_lab in uniq_th: inds = np.where(run['thread_labels'] == th_lab)[0] th_info = 'thread label={}, first_logl={}, thread_min_max={}'.format( th_lab, run['logl'][inds[0]], run['thread_min_max'][th_lab, :]) assert run['thread_min_max'][th_lab, 0] <= run['logl'][inds[0]], ( 'First point in thread has logl less than thread min logl! ' + th_info + ', difference={}'.format( run['logl'][inds[0]] - run['thread_min_max'][th_lab, 0])) assert run['thread_min_max'][th_lab, 1] == run['logl'][inds[-1]], ( 'Last point in thread logl != thread end logl! ' + th_info)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_samples(ns_run, **kwargs): r"""Number of samples in run. Unlike most estimators this does not require log weights, but for convenience will not throw an error if they are specified. Parameters ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). Returns ------- int """
kwargs.pop('logw', None) kwargs.pop('simulate', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) return ns_run['logl'].shape[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_latex_name(func_in, **kwargs): """ Produce a latex formatted name for each function for use in labelling results. Parameters func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. """
if isinstance(func_in, functools.partial): func = func_in.func assert not set(func_in.keywords) & set(kwargs), ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' .format(kwargs, func_in.keywords)) kwargs.update(func_in.keywords) else: func = func_in param_ind = kwargs.pop('param_ind', 0) probability = kwargs.pop('probability', 0.5) kwargs.pop('handle_indexerror', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) ind_str = r'{\hat{' + str(param_ind + 1) + '}}' latex_name_dict = { 'count_samples': r'samples', 'logz': r'$\mathrm{log} \mathcal{Z}$', 'evidence': r'$\mathcal{Z}$', 'r_mean': r'$\overline{|\theta|}$', 'param_mean': r'$\overline{\theta_' + ind_str + '}$', 'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'} # Add credible interval names if probability == 0.5: cred_str = r'$\mathrm{median}(' else: # format percent without trailing zeros percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.') cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}(' latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$' latex_name_dict['r_cred'] = cred_str + r'|\theta|)$' try: return latex_name_dict[func.__name__] except KeyError as err: err.args = err.args + ('get_latex_name not yet set up for ' + func.__name__,) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def weighted_quantile(probability, values, weights): """ Get quantile estimate for input probability given weighted samples using linear interpolation. Parameters probability: float Quantile to estimate - must be in open interval (0, 1). For example, use 0.5 for the median and 0.84 for the upper 84% quantile. values: 1d numpy array Sample values. weights: 1d numpy array Corresponding sample weights (same shape as values). Returns ------- quantile: float """
assert 1 > probability > 0, ( 'credible interval prob= ' + str(probability) + ' not in (0, 1)') assert values.shape == weights.shape assert values.ndim == 1 assert weights.ndim == 1 sorted_inds = np.argsort(values) quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds]) quantiles /= np.sum(weights) return np.interp(probability, quantiles, values[sorted_inds])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_list_error_values(run_list, estimator_list, estimator_names, n_simulate=100, **kwargs): """Gets a data frame with calculation values and error diagnostics for each run in the input run list. NB when parallelised the results will not be produced in order (so results from some run number will not nessesarily correspond to that number run in run_list). Parameters run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. n_simulate: int, optional Number of bootstrap replications to use on each run. thread_pvalue: bool, optional Whether or not to compute KS test diaganostic for correlations between threads within a run. bs_stat_dist: bool, optional Whether or not to compute statistical distance between bootstrap error distributions diaganostic. parallel: bool, optional Whether or not to parallelise - see parallel_utils.parallel_apply. save_name: str or None, optional See nestcheck.io_utils.save_load_result. save: bool, optional See nestcheck.io_utils.save_load_result. load: bool, optional See nestcheck.io_utils.save_load_result. overwrite_existing: bool, optional See nestcheck.io_utils.save_load_result. Returns ------- df: pandas DataFrame Results table showing calculation values and diagnostics. Rows show different runs (or pairs of runs for pairwise comparisons). Columns have titles given by estimator_names and show results for the different functions in estimators_list. """
thread_pvalue = kwargs.pop('thread_pvalue', False) bs_stat_dist = kwargs.pop('bs_stat_dist', False) parallel = kwargs.pop('parallel', True) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) # Calculation results # ------------------- df = estimator_values_df(run_list, estimator_list, parallel=parallel, estimator_names=estimator_names) df.index = df.index.map(str) df['calculation type'] = 'values' df.set_index('calculation type', drop=True, append=True, inplace=True) df = df.reorder_levels(['calculation type', 'run']) # Bootstrap stds # -------------- # Create bs_vals_df then convert to stds so bs_vals_df does not need to be # recomputed if bs_stat_dist is True bs_vals_df = bs_values_df(run_list, estimator_list, estimator_names, n_simulate, parallel=parallel) bs_std_df = bs_vals_df.applymap(lambda x: np.std(x, ddof=1)) bs_std_df.index.name = 'run' bs_std_df['calculation type'] = 'bootstrap std' bs_std_df.set_index('calculation type', drop=True, append=True, inplace=True) bs_std_df = bs_std_df.reorder_levels(['calculation type', 'run']) df = pd.concat([df, bs_std_df]) # Pairwise KS p-values on threads # ------------------------------- if thread_pvalue: t_vals_df = thread_values_df( run_list, estimator_list, estimator_names, parallel=parallel) t_d_df = pairwise_dists_on_cols(t_vals_df, earth_mover_dist=False, energy_dist=False) # Keep only the p value not the distance measures t_d_df = t_d_df.xs('ks pvalue', level='calculation type', drop_level=False) # Append 'thread ' to caclulcation type t_d_df.index.set_levels(['thread ks pvalue'], level='calculation type', inplace=True) df = pd.concat([df, t_d_df]) # Pairwise distances on BS distributions # -------------------------------------- if bs_stat_dist: b_d_df = pairwise_dists_on_cols(bs_vals_df) # Select only statistical distances - not KS pvalue as this is not # useful for the bootstrap resample distributions (see Higson et al. # 2019 for more details). dists = ['ks distance', 'earth mover distance', 'energy distance'] b_d_df = b_d_df.loc[pd.IndexSlice[dists, :], :] # Append 'bootstrap ' to caclulcation type new_ind = ['bootstrap ' + b_d_df.index.get_level_values('calculation type'), b_d_df.index.get_level_values('run')] b_d_df.set_index(new_ind, inplace=True) df = pd.concat([df, b_d_df]) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimator_values_df(run_list, estimator_list, **kwargs): """Get a dataframe of estimator values. NB when parallelised the results will not be produced in order (so results from some run number will not nessesarily correspond to that number run in run_list). Parameters run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs, optional Name of each func in estimator_list. parallel: bool, optional Whether or not to parallelise - see parallel_utils.parallel_apply. save_name: str or None, optional See nestcheck.io_utils.save_load_result. save: bool, optional See nestcheck.io_utils.save_load_result. load: bool, optional See nestcheck.io_utils.save_load_result. overwrite_existing: bool, optional See nestcheck.io_utils.save_load_result. Returns ------- df: pandas DataFrame Results table showing calculation values and diagnostics. Rows show different runs. Columns have titles given by estimator_names and show results for the different functions in estimators_list. """
estimator_names = kwargs.pop( 'estimator_names', ['est_' + str(i) for i in range(len(estimator_list))]) parallel = kwargs.pop('parallel', True) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) values_list = pu.parallel_apply( nestcheck.ns_run_utils.run_estimators, run_list, func_args=(estimator_list,), parallel=parallel) df = pd.DataFrame(np.stack(values_list, axis=0)) df.columns = estimator_names df.index.name = 'run' return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def error_values_summary(error_values, **summary_df_kwargs): """Get summary statistics about calculation errors, including estimated implementation errors. Parameters error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results. """
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs) # get implementation stds imp_std, imp_std_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values std', 'value')], df.loc[('values std', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation std', 'value'), df.columns] = imp_std df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc df.loc[('implementation std frac', 'value'), :] = imp_frac df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc # Get implementation RMSEs (calculated using the values RMSE instead of # values std) if 'values rmse' in set(df.index.get_level_values('calculation type')): imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values rmse', 'value')], df.loc[('values rmse', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse df.loc[('implementation rmse', 'uncertainty'), df.columns] = \ imp_rmse_unc df.loc[('implementation rmse frac', 'value'), :] = imp_frac df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc # Return only the calculation types we are interested in, in order calcs_to_keep = ['true values', 'values mean', 'values std', 'values rmse', 'bootstrap std mean', 'implementation std', 'implementation std frac', 'implementation rmse', 'implementation rmse frac', 'thread ks pvalue mean', 'bootstrap ks distance mean', 'bootstrap energy distance mean', 'bootstrap earth mover distance mean'] df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for calc in calcs_to_keep if calc in df.index.get_level_values('calculation type')]) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_list_error_summary(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output. """
true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) error_values = run_list_error_values(run_list, estimator_list, estimator_names, n_simulate, **kwargs) return error_values_summary(error_values, true_values=true_values, include_true_values=include_true_values, include_rmse=include_rmse)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bs_values_df(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Computes a data frame of bootstrap resampled values. Parameters run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. n_simulate: int Number of bootstrap replications to use on each run. kwargs: Kwargs to pass to parallel_apply. Returns ------- bs_values_df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d array of bootstrap resampled values for the run and estimator. """
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'}) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) bs_values_list = pu.parallel_apply( nestcheck.error_analysis.run_bootstrap_values, run_list, func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate}, tqdm_kwargs=tqdm_kwargs, **kwargs) df = pd.DataFrame() for i, name in enumerate(estimator_names): df[name] = [arr[i, :] for arr in bs_values_list] # Check there are the correct number of bootstrap replications in each cell for vals_shape in df.loc[0].apply(lambda x: x.shape).values: assert vals_shape == (n_simulate,), ( 'Should be n_simulate=' + str(n_simulate) + ' values in ' + 'each cell. The cell contains array with shape ' + str(vals_shape)) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def thread_values_df(run_list, estimator_list, estimator_names, **kwargs): """Calculates estimator values for the constituent threads of the input runs. Parameters run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. kwargs: Kwargs to pass to parallel_apply. Returns ------- df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d numpy array with length equal to the number of threads in the run, containing the results from evaluating the estimator on each thread. """
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'}) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) # get thread results thread_vals_arrays = pu.parallel_apply( nestcheck.error_analysis.run_thread_values, run_list, func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, **kwargs) df = pd.DataFrame() for i, name in enumerate(estimator_names): df[name] = [arr[i, :] for arr in thread_vals_arrays] # Check there are the correct number of thread values in each cell for vals_shape in df.loc[0].apply(lambda x: x.shape).values: assert vals_shape == (run_list[0]['thread_min_max'].shape[0],), \ ('Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]) + ' values in each cell. The cell contains array with shape ' + str(vals_shape)) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pairwise_dists_on_cols(df_in, earth_mover_dist=True, energy_dist=True): """Computes pairwise statistical distance measures. parameters df_in: pandas data frame Columns represent estimators and rows represent runs. Each data frane element is an array of values which are used as samples in the distance measures. earth_mover_dist: bool, optional Passed to error_analysis.pairwise_distances. energy_dist: bool, optional Passed to error_analysis.pairwise_distances. returns ------- df: pandas data frame with kl values for each pair. """
df = pd.DataFrame() for col in df_in.columns: df[col] = nestcheck.error_analysis.pairwise_distances( df_in[col].values, earth_mover_dist=earth_mover_dist, energy_dist=energy_dist) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backtick_columns(cols): """ Quote the column names """
def bt(s): b = '' if s == '*' or not s else '`' return [_ for _ in [b + (s or '') + b] if _] formatted = [] for c in cols: if c[0] == '#': formatted.append(c[1:]) elif c.startswith('(') and c.endswith(')'): # WHERE (column_a, column_b) IN ((1,10), (1,20)) formatted.append(c) else: # backtick the former part when it meets the first dot, and then all the rest formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:])))) return ', '.join(formatted)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _by_columns(self, columns): """ Allow select.group and select.order accepting string and list """
return columns if self.isstr(columns) else self._backtick_columns(columns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_whitespace(txt): """ Returns a list containing the whitespace to the left and right of a string as its two elements """
# if the entire parameter is whitespace rall = re.search(r'^([\s])+$', txt) if rall: tmp = txt.split('\n', 1) if len(tmp) == 2: return (tmp[0], '\n' + tmp[1]) # left, right else: return ('', tmp[0]) # left, right left = '' # find whitespace to the left of the parameter rlm = re.search(r'^([\s])+', txt) if rlm: left = rlm.group(0) right = '' # find whitespace to the right of the parameter rrm = re.search(r'([\s])+$', txt) if rrm: right = rrm.group(0) return (left, right)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_whitespace_pattern(self): """ Try to find a whitespace pattern in the existing parameters to be applied to a newly added parameter """
name_ws = [] value_ws = [] for entry in self._entries: name_ws.append(get_whitespace(entry.name)) if entry.value != '': value_ws.append(get_whitespace(entry._value)) # _value is unstripped if len(value_ws) >= 1: value_ws = most_common(value_ws) else: value_ws = ('', ' ') if len(name_ws) >= 1: name_ws = most_common(name_ws) else: name_ws = (' ', '') return name_ws, value_ws
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _path_for_file(self, project_name, date): """ Generate the path on disk for a specified project and date. :param project_name: the PyPI project name for the data :type project: str :param date: the date for the data :type date: datetime.datetime :return: path for where to store this data on disk :rtype: str """
return os.path.join( self.cache_path, '%s_%s.json' % (project_name, date.strftime('%Y%m%d')) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, project, date): """ Get the cache data for a specified project for the specified date. Returns None if the data cannot be found in the cache. :param project: PyPi project name to get data for :type project: str :param date: date to get data for :type date: datetime.datetime :return: dict of per-date data for project :rtype: :py:obj:`dict` or ``None`` """
fpath = self._path_for_file(project, date) logger.debug('Cache GET project=%s date=%s - path=%s', project, date.strftime('%Y-%m-%d'), fpath) try: with open(fpath, 'r') as fh: data = json.loads(fh.read()) except: logger.debug('Error getting from cache for project=%s date=%s', project, date.strftime('%Y-%m-%d')) return None data['cache_metadata']['date'] = datetime.strptime( data['cache_metadata']['date'], '%Y%m%d' ) data['cache_metadata']['updated'] = datetime.fromtimestamp( data['cache_metadata']['updated'] ) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, project, date, data, data_ts): """ Set the cache data for a specified project for the specified date. :param project: project name to set data for :type project: str :param date: date to set data for :type date: datetime.datetime :param data: data to cache :type data: dict :param data_ts: maximum timestamp in the BigQuery data table :type data_ts: int """
data['cache_metadata'] = { 'project': project, 'date': date.strftime('%Y%m%d'), 'updated': time.time(), 'version': VERSION, 'data_ts': data_ts } fpath = self._path_for_file(project, date) logger.debug('Cache SET project=%s date=%s - path=%s', project, date.strftime('%Y-%m-%d'), fpath) with open(fpath, 'w') as fh: fh.write(json.dumps(data))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dates_for_project(self, project): """ Return a list of the dates we have in cache for the specified project, sorted in ascending date order. :param project: project name :type project: str :return: list of datetime.datetime objects :rtype: datetime.datetime """
file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project) all_dates = [] for f in os.listdir(self.cache_path): if not os.path.isfile(os.path.join(self.cache_path, f)): continue m = file_re.match(f) if m is None: continue all_dates.append(datetime.strptime(m.group(1), '%Y%m%d')) return sorted(all_dates)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_log_level_format(level, format): """ Set logger level and format. :param level: logging level; see the :py:mod:`logging` constants. :type level: int :param format: logging formatter format string :type format: str """
formatter = logging.Formatter(fmt=format) logger.handlers[0].setFormatter(formatter) logger.setLevel(level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pypi_get_projects_for_user(username): """ Given the username of a PyPI user, return a list of all of the user's projects from the XMLRPC interface. See: https://wiki.python.org/moin/PyPIXmlRpc :param username: PyPI username :type username: str :return: list of string project names :rtype: ``list`` """
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') pkgs = client.user_packages(username) # returns [role, package] return [x[1] for x in pkgs]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_graph(self): """ Generate the graph; return a 2-tuple of strings, script to place in the head of the HTML document and div content for the graph itself. :return: 2-tuple (script, div) :rtype: tuple """
logger.debug('Generating graph for %s', self._graph_id) # tools to use tools = [ PanTool(), BoxZoomTool(), WheelZoomTool(), SaveTool(), ResetTool(), ResizeTool() ] # generate the stacked area graph try: g = Area( self._data, x='Date', y=self._y_series_names, title=self._title, stack=True, xlabel='Date', ylabel='Downloads', tools=tools, # note the width and height will be set by JavaScript plot_height=400, plot_width=800, toolbar_location='above', legend=False ) except Exception as ex: logger.error("Error generating %s graph", self._graph_id) logger.error("Data: %s", self._data) logger.error("y=%s", self._y_series_names) raise ex lines = [] legend_parts = [] # add a line at the top of each Patch (stacked area) for hovertool for renderer in g.select(GlyphRenderer): if not isinstance(renderer.glyph, Patches): continue series_name = renderer.data_source.data['series'][0] logger.debug('Adding line for Patches %s (series: %s)', renderer, series_name) line = self._line_for_patches(self._data, g, renderer, series_name) if line is not None: lines.append(line) legend_parts.append((series_name, [line])) # add the Hovertool, specifying only our line glyphs g.add_tools( HoverTool( tooltips=[ (self._y_name, '@SeriesName'), ('Date', '@FmtDate'), ('Downloads', '@Downloads'), ], renderers=lines, line_policy='nearest' ) ) # legend outside chart area legend = Legend(legends=legend_parts, location=(0, 0)) g.add_layout(legend, 'right') return components(g)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _line_for_patches(self, data, chart, renderer, series_name): """ Add a line along the top edge of a Patch in a stacked Area Chart; return the new Glyph for addition to HoverTool. :param data: original data for the graph :type data: dict :param chart: Chart to add the line to :type chart: bokeh.charts.Chart :param renderer: GlyphRenderer containing one Patches glyph, to draw the line for :type renderer: bokeh.models.renderers.GlyphRenderer :param series_name: the data series name this Patches represents :type series_name: str :return: GlyphRenderer for a Line at the top edge of this Patch :rtype: bokeh.models.renderers.GlyphRenderer """
# @TODO this method needs a major refactor # get the original x and y values, and color xvals = deepcopy(renderer.data_source.data['x_values'][0]) yvals = deepcopy(renderer.data_source.data['y_values'][0]) line_color = renderer.glyph.fill_color # save original values for logging if needed orig_xvals = [x for x in xvals] orig_yvals = [y for y in yvals] # get a list of the values new_xvals = [x for x in xvals] new_yvals = [y for y in yvals] # so when a Patch is made, the first point is (0,0); trash it xvals = new_xvals[1:] yvals = new_yvals[1:] # then, we can tell the last point in the "top" line because it will be # followed by a point with the same x value and a y value of 0. last_idx = None for idx, val in enumerate(xvals): if yvals[idx+1] == 0 and xvals[idx+1] == xvals[idx]: last_idx = idx break if last_idx is None: logger.error('Unable to find top line of patch (x_values=%s ' 'y_values=%s', orig_xvals, orig_yvals) return None # truncate our values to just what makes up the top line xvals = xvals[:last_idx+1] yvals = yvals[:last_idx+1] # Currently (bokeh 0.12.1) HoverTool won't show the tooltip for the last # point in our line. As a hack for this, add a point with the same Y # value and an X slightly before it. lastx = xvals[-1] xvals[-1] = lastx - 1000 # 1000 nanoseconds xvals.append(lastx) yvals.append(yvals[-1]) # get the actual download counts from the original data download_counts = [ data[series_name][y] for y in range(0, len(yvals) - 1) ] download_counts.append(download_counts[-1]) # create a ColumnDataSource for the new overlay line data2 = { 'x': xvals, # Date/x values are numpy.datetime64 'y': yvals, # the following are hacks for data that we want in the HoverTool # tooltip 'SeriesName': [series_name for _ in yvals], # formatted date 'FmtDate': [self.datetime64_to_formatted_date(x) for x in xvals], # to show the exact value, not where the pointer is 'Downloads': download_counts } # set the formatted date for our hacked second-to-last point to the # same value as the last point data2['FmtDate'][-2] = data2['FmtDate'][-1] # create the CloumnDataSource, then the line for it, then the Glyph line_ds = ColumnDataSource(data2) line = Line(x='x', y='y', line_color=line_color) lineglyph = chart.add_glyph(line_ds, line) return lineglyph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_empty_cache_record(self, rec): """ Return True if the specified cache record has no data, False otherwise. :param rec: cache record returned by :py:meth:`~._cache_get` :type rec: dict :return: True if record is empty, False otherwise :rtype: bool """
# these are taken from DataQuery.query_one_table() for k in [ 'by_version', 'by_file_type', 'by_installer', 'by_implementation', 'by_system', 'by_distro', 'by_country' ]: if k in rec and len(rec[k]) > 0: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cache_get(self, date): """ Return cache data for the specified day; cache locally in this class. :param date: date to get data for :type date: datetime.datetime :return: cache data for date :rtype: dict """
if date in self.cache_data: logger.debug('Using class-cached data for date %s', date.strftime('%Y-%m-%d')) return self.cache_data[date] logger.debug('Getting data from cache for date %s', date.strftime('%Y-%m-%d')) data = self.cache.get(self.project_name, date) self.cache_data[date] = data return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_version_data(self): """ Return download data by version. :return: dict of cache data; keys are datetime objects, values are dict of version (str) to count (int) :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) if len(data['by_version']) == 0: data['by_version'] = {'other': 0} ret[cache_date] = data['by_version'] return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_file_type_data(self): """ Return download data by file type. :return: dict of cache data; keys are datetime objects, values are dict of file type (str) to count (int) :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) if len(data['by_file_type']) == 0: data['by_file_type'] = {'other': 0} ret[cache_date] = data['by_file_type'] return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_installer_data(self): """ Return download data by installer name and version. :return: dict of cache data; keys are datetime objects, values are dict of installer name/version (str) to count (int). :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for inst_name, inst_data in data['by_installer'].items(): for inst_ver, count in inst_data.items(): k = self._compound_column_value( inst_name, self._shorten_version(inst_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_implementation_data(self): """ Return download data by python impelementation name and version. :return: dict of cache data; keys are datetime objects, values are dict of implementation name/version (str) to count (int). :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for impl_name, impl_data in data['by_implementation'].items(): for impl_ver, count in impl_data.items(): k = self._compound_column_value( impl_name, self._shorten_version(impl_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_system_data(self): """ Return download data by system. :return: dict of cache data; keys are datetime objects, values are dict of system (str) to count (int) :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = { self._column_value(x): data['by_system'][x] for x in data['by_system'] } if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_country_data(self): """ Return download data by country. :return: dict of cache data; keys are datetime objects, values are dict of country (str) to count (int) :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for cc, count in data['by_country'].items(): k = '%s (%s)' % (self._alpha2_to_country(cc), cc) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def per_distro_data(self): """ Return download data by distro name and version. :return: dict of cache data; keys are datetime objects, values are dict of distro name/version (str) to count (int). :rtype: dict """
ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for distro_name, distro_data in data['by_distro'].items(): if distro_name.lower() == 'red hat enterprise linux server': distro_name = 'RHEL' for distro_ver, count in distro_data.items(): ver = self._shorten_version(distro_ver, num_components=1) if distro_name.lower() == 'os x': ver = self._shorten_version(distro_ver, num_components=2) k = self._compound_column_value(distro_name, ver) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def downloads_per_day(self): """ Return the number of downloads per day, averaged over the past 7 days of data. :return: average number of downloads per day :rtype: int """
count, num_days = self._downloads_for_num_days(7) res = ceil(count / num_days) logger.debug("Downloads per day = (%d / %d) = %d", count, num_days, res) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def downloads_per_week(self): """ Return the number of downloads in the last 7 days. :return: number of downloads in the last 7 days; if we have less than 7 days of data, returns None. :rtype: int """
if len(self.cache_dates) < 7: logger.error("Only have %d days of data; cannot calculate " "downloads per week", len(self.cache_dates)) return None count, _ = self._downloads_for_num_days(7) logger.debug("Downloads per week = %d", count) return count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_project_id(self): """ Get our projectId from the ``GOOGLE_APPLICATION_CREDENTIALS`` creds JSON file. :return: project ID :rtype: str """
fpath = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None) if fpath is None: raise Exception('ERROR: No project ID specified, and ' 'GOOGLE_APPLICATION_CREDENTIALS env var is not set') fpath = os.path.abspath(os.path.expanduser(fpath)) logger.debug('Reading credentials file at %s to get project_id', fpath) with open(fpath, 'r') as fh: cred_data = json.loads(fh.read()) return cred_data['project_id']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_bigquery_service(self): """ Connect to the BigQuery service. Calling ``GoogleCredentials.get_application_default`` requires that you either be running in the Google Cloud, or have the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path to a credentials JSON file. :return: authenticated BigQuery service connection object :rtype: `googleapiclient.discovery.Resource <http://google.github.io/\ google-api-python-client/docs/epy/googleapiclient.discovery.\ Resource-class.html>`_ """
logger.debug('Getting Google Credentials') credentials = GoogleCredentials.get_application_default() logger.debug('Building BigQuery service instance') bigquery_service = build('bigquery', 'v2', credentials=credentials) return bigquery_service
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_query(self, query): """ Run one query against BigQuery and return the result. :param query: the query to run :type query: str :return: list of per-row response dicts (key => value) :rtype: ``list`` """
query_request = self.service.jobs() logger.debug('Running query: %s', query) start = datetime.now() resp = query_request.query( projectId=self.project_id, body={'query': query} ).execute() duration = datetime.now() - start logger.debug('Query response (in %s): %s', duration, resp) if not resp['jobComplete']: logger.error('Error: query reported job not complete!') if int(resp['totalRows']) == 0: return [] if int(resp['totalRows']) != len(resp['rows']): logger.error('Error: query reported %s total rows, but only ' 'returned %d', resp['totalRows'], len(resp['rows'])) data = [] fields = [f['name'] for f in resp['schema']['fields']] for row in resp['rows']: d = {} for idx, val in enumerate(row['f']): d[fields[idx]] = val['v'] data.append(d) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_newest_ts_in_table(self, table_name): """ Return the timestamp for the newest record in the given table. :param table_name: name of the table to query :type table_name: str :return: timestamp of newest row in table :rtype: int """
logger.debug( 'Querying for newest timestamp in table %s', table_name ) q = "SELECT TIMESTAMP_TO_SEC(MAX(timestamp)) AS max_ts %s;" % ( self._from_for_table(table_name) ) res = self._run_query(q) ts = int(res[0]['max_ts']) logger.debug('Newest timestamp in table %s: %s', table_name, ts) return ts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query_by_installer(self, table_name): """ Query for download data broken down by installer, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by installer; keys are project name, values are a dict of installer names to dicts of installer version to download count. :rtype: dict """
logger.info('Querying for downloads by installer in table %s', table_name) q = "SELECT file.project, details.installer.name, " \ "details.installer.version, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.installer.name, " \ "details.installer.version;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() # iterate through results for row in res: # pointer to the per-project result dict proj = result[row['file_project']] # grab the name and version; change None to 'unknown' iname = row['details_installer_name'] iver = row['details_installer_version'] if iname not in proj: proj[iname] = {} if iver not in proj[iname]: proj[iname][iver] = 0 proj[iname][iver] += int(row['dl_count']) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query_by_system(self, table_name): """ Query for download data broken down by system, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by system; keys are project name, values are a dict of system names to download count. :rtype: dict """
logger.info('Querying for downloads by system in table %s', table_name) q = "SELECT file.project, details.system.name, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.system.name;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() for row in res: system = row['details_system_name'] result[row['file_project']][system] = int( row['dl_count']) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query_by_distro(self, table_name): """ Query for download data broken down by OS distribution, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by distro; keys are project name, values are a dict of distro names to dicts of distro version to download count. :rtype: dict """
logger.info('Querying for downloads by distro in table %s', table_name) q = "SELECT file.project, details.distro.name, " \ "details.distro.version, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.distro.name, " \ "details.distro.version;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() # iterate through results for row in res: # pointer to the per-project result dict proj = result[row['file_project']] # grab the name and version; change None to 'unknown' dname = row['details_distro_name'] dver = row['details_distro_version'] if dname not in proj: proj[dname] = {} if dver not in proj[dname]: proj[dname][dver] = 0 proj[dname][dver] += int(row['dl_count']) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _have_cache_for_date(self, dt): """ Return True if we have cached data for all projects for the specified datetime. Return False otherwise. :param dt: datetime to find cache for :type dt: datetime.datetime :return: True if we have cache for all projects for this date, False otherwise :rtype: bool """
for p in self.projects: if self.cache.get(p, dt) is None: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def backfill_history(self, num_days, available_table_names): """ Backfill historical data for days that are missing. :param num_days: number of days of historical data to backfill, if missing :type num_days: int :param available_table_names: names of available per-date tables :type available_table_names: ``list`` """
if num_days == -1: # skip the first date, under the assumption that data may be # incomplete logger.info('Backfilling all available history') start_table = available_table_names[1] else: logger.info('Backfilling %d days of history', num_days) start_table = available_table_names[-1 * num_days] start_date = self._datetime_for_table_name(start_table) end_table = available_table_names[-3] end_date = self._datetime_for_table_name(end_table) logger.debug( 'Backfilling history from %s (%s) to %s (%s)', start_table, start_date.strftime('%Y-%m-%d'), end_table, end_date.strftime('%Y-%m-%d') ) for days in range((end_date - start_date).days + 1): backfill_dt = start_date + timedelta(days=days) if self._have_cache_for_date(backfill_dt): logger.info('Cache present for all projects for %s; skipping', backfill_dt.strftime('%Y-%m-%d')) continue backfill_table = self._table_name_for_datetime(backfill_dt) logger.info('Backfilling %s (%s)', backfill_table, backfill_dt.strftime('%Y-%m-%d')) self.query_one_table(backfill_table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_queries(self, backfill_num_days=7): """ Run the data queries for the specified projects. :param backfill_num_days: number of days of historical data to backfill, if missing :type backfill_num_days: int """
available_tables = self._get_download_table_ids() logger.debug('Found %d available download tables: %s', len(available_tables), available_tables) today_table = available_tables[-1] yesterday_table = available_tables[-2] self.query_one_table(today_table) self.query_one_table(yesterday_table) self.backfill_history(backfill_num_days, available_tables)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_html(self): """ Generate the HTML for the specified graphs. :return: :rtype: """
logger.debug('Generating templated HTML') env = Environment( loader=PackageLoader('pypi_download_stats', 'templates'), extensions=['jinja2.ext.loopcontrols']) env.filters['format_date_long'] = filter_format_date_long env.filters['format_date_ymd'] = filter_format_date_ymd env.filters['data_columns'] = filter_data_columns template = env.get_template('base.html') logger.debug('Rendering template') html = template.render( project=self.project_name, cache_date=self._stats.as_of_datetime, user=getuser(), host=platform_node(), version=VERSION, proj_url=PROJECT_URL, graphs=self._graphs, graph_keys=self.GRAPH_KEYS, resources=Resources(mode='inline').render(), badges=self._badges ) logger.debug('Template rendered') return html
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _limit_data(self, data): """ Find the per-day average of each series in the data over the last 7 days; drop all but the top 10. :param data: original graph data :type data: dict :return: dict containing only the top 10 series, based on average over the last 7 days. :rtype: dict """
if len(data.keys()) <= 10: logger.debug("Data has less than 10 keys; not limiting") return data # average last 7 days of each series avgs = {} for k in data: if len(data[k]) <= 7: vals = data[k] else: vals = data[k][-7:] avgs[k] = sum(vals) / len(vals) # hold state final_data = {} # final data dict other = [] # values for dropped/'other' series count = 0 # iteration counter # iterate the sorted averages; either drop or keep for k in sorted(avgs, key=avgs.get, reverse=True): if count < 10: final_data[k] = data[k] logger.debug("Keeping data series %s (average over last 7 " "days of data: %d", k, avgs[k]) else: logger.debug("Adding data series %s to 'other' (average over " "last 7 days of data: %d", k, avgs[k]) other.append(data[k]) count += 1 # sum up the other data and add to final final_data['other'] = [sum(series) for series in zip(*other)] return final_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_graph(self, name, title, stats_data, y_name): """ Generate a downloads graph; append it to ``self._graphs``. :param name: HTML name of the graph, also used in ``self.GRAPH_KEYS`` :type name: str :param title: human-readable title for the graph :type title: str :param stats_data: data dict from ``self._stats`` :type stats_data: dict :param y_name: Y axis metric name :type y_name: str """
logger.debug('Generating chart data for %s graph', name) orig_data, labels = self._data_dict_to_bokeh_chart_data(stats_data) data = self._limit_data(orig_data) logger.debug('Generating %s graph', name) script, div = FancyAreaGraph( name, '%s %s' % (self.project_name, title), data, labels, y_name).generate_graph() logger.debug('%s graph generated', name) self._graphs[name] = { 'title': title, 'script': script, 'div': div, 'raw_data': stats_data }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_badges(self): """ Generate download badges. Append them to ``self._badges``. """
daycount = self._stats.downloads_per_day day = self._generate_badge('Downloads', '%d/day' % daycount) self._badges['per-day'] = day weekcount = self._stats.downloads_per_week if weekcount is None: # we don't have enough data for week (or month) return week = self._generate_badge('Downloads', '%d/week' % weekcount) self._badges['per-week'] = week monthcount = self._stats.downloads_per_month if monthcount is None: # we don't have enough data for month return month = self._generate_badge('Downloads', '%d/month' % monthcount) self._badges['per-month'] = month
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_badge(self, subject, status): """ Generate SVG for one badge via shields.io. :param subject: subject; left-hand side of badge :type subject: str :param status: status; right-hand side of badge :type status: str :return: badge SVG :rtype: str """
url = 'https://img.shields.io/badge/%s-%s-brightgreen.svg' \ '?style=flat&maxAge=3600' % (subject, status) logger.debug("Getting badge for %s => %s (%s)", subject, status, url) res = requests.get(url) if res.status_code != 200: raise Exception("Error: got status %s for shields.io badge: %s", res.status_code, res.text) logger.debug('Got %d character response from shields.io', len(res.text)) return res.text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self): """ Generate all output types and write to disk. """
logger.info('Generating graphs') self._generate_graph( 'by-version', 'Downloads by Version', self._stats.per_version_data, 'Version' ) self._generate_graph( 'by-file-type', 'Downloads by File Type', self._stats.per_file_type_data, 'File Type' ) self._generate_graph( 'by-installer', 'Downloads by Installer', self._stats.per_installer_data, 'Installer' ) self._generate_graph( 'by-implementation', 'Downloads by Python Implementation/Version', self._stats.per_implementation_data, 'Implementation/Version' ) self._generate_graph( 'by-system', 'Downloads by System Type', self._stats.per_system_data, 'System' ) self._generate_graph( 'by-country', 'Downloads by Country', self._stats.per_country_data, 'Country' ) self._generate_graph( 'by-distro', 'Downloads by Distro', self._stats.per_distro_data, 'Distro' ) self._generate_badges() logger.info('Generating HTML') html = self._generate_html() html_path = os.path.join(self.output_dir, 'index.html') with open(html_path, 'wb') as fh: fh.write(html.encode('utf-8')) logger.info('HTML report written to %s', html_path) logger.info('Writing SVG badges') for name, svg in self._badges.items(): path = os.path.join(self.output_dir, '%s.svg' % name) with open(path, 'w') as fh: fh.write(svg) logger.info('%s badge written to: %s', name, path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def datetime_from_iso(iso_string): """ Create a DateTime object from a ISO string .. code :: python reusables.datetime_from_iso('2017-03-10T12:56:55.031863') datetime.datetime(2017, 3, 10, 12, 56, 55, 31863) :param iso_string: string of an ISO datetime :return: DateTime object """
try: assert datetime_regex.datetime.datetime.match(iso_string).groups()[0] except (ValueError, AssertionError, IndexError, AttributeError): raise TypeError("String is not in ISO format") try: return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def now(utc=False, tz=None): """ Get a current DateTime object. By default is local. .. code:: python reusables.now() # DateTime(2016, 12, 8, 22, 5, 2, 517000) reusables.now().format("It's {24-hour}:{min}") # "It's 22:05" :param utc: bool, default False, UTC time not local :param tz: TimeZone as specified by the datetime module :return: reusables.DateTime """
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=None, copy_local_env=False, **kwargs): """ Cross platform compatible subprocess with CompletedProcess return. No formatting or encoding is performed on the output of subprocess, so it's output will appear the same on each version / interpreter as before. .. code:: python reusables.run('echo "hello world!', shell=True) # CPython 3.6 # CompletedProcess(args='echo "hello world!', returncode=0, # stdout=b'"hello world!\\r\\n', stderr=b'') # # PyPy 5.4 (Python 2.7.10) # CompletedProcess(args='echo "hello world!', returncode=0L, # stdout='"hello world!\\r\\n') Timeout is only usable in Python 3.X, as it was not implemented before then, a NotImplementedError will be raised if specified on 2.x version of Python. :param command: command to run, str if shell=True otherwise must be list :param input: send something `communicate` :param stdout: PIPE or None :param stderr: PIPE or None :param timeout: max time to wait for command to complete :param copy_local_env: Use all current ENV vars in the subprocess as well :param kwargs: additional arguments to pass to Popen :return: CompletedProcess class """
if copy_local_env: # Copy local env first and overwrite with anything manually specified env = os.environ.copy() env.update(kwargs.get('env', {})) else: env = kwargs.get('env') if sys.version_info >= (3, 5): return subprocess.run(command, input=input, stdout=stdout, stderr=stderr, timeout=timeout, env=env, **kwargs) # Created here instead of root level as it should never need to be # manually created or referenced class CompletedProcess(object): """A backwards compatible near clone of subprocess.CompletedProcess""" def __init__(self, args, returncode, stdout=None, stderr=None): self.args = args self.returncode = returncode self.stdout = stdout self.stderr = stderr def __repr__(self): args = ['args={0!r}'.format(self.args), 'returncode={0!r}'.format(self.returncode), 'stdout={0!r}'.format(self.stdout) if self.stdout else '', 'stderr={0!r}'.format(self.stderr) if self.stderr else ''] return "{0}({1})".format(type(self).__name__, ', '.join(filter(None, args))) def check_returncode(self): if self.returncode: if python_version < (2, 7): raise subprocess.CalledProcessError(self.returncode, self.args) raise subprocess.CalledProcessError(self.returncode, self.args, self.stdout) proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, env=env, **kwargs) if PY3: out, err = proc.communicate(input=input, timeout=timeout) else: if timeout: raise NotImplementedError("Timeout is only available on Python 3") out, err = proc.communicate(input=input) return CompletedProcess(command, proc.returncode, out, err)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_in_pool(target, iterable, threaded=True, processes=4, asynchronous=False, target_kwargs=None): """ Run a set of iterables to a function in a Threaded or MP Pool. .. code: python def func(a): return a + a reusables.run_in_pool(func, [1,2,3,4,5]) # [1, 4, 9, 16, 25] :param target: function to run :param iterable: positional arg to pass to function :param threaded: Threaded if True multiprocessed if False :param processes: Number of workers :param asynchronous: will do map_async if True :param target_kwargs: Keyword arguments to set on the function as a partial :return: pool results """
my_pool = pool.ThreadPool if threaded else pool.Pool if target_kwargs: target = partial(target, **target_kwargs if target_kwargs else None) p = my_pool(processes) try: results = (p.map_async(target, iterable) if asynchronous else p.map(target, iterable)) finally: p.close() p.join() return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree_view(dictionary, level=0, sep="| "): """ View a dictionary as a tree. """
return "".join(["{0}{1}\n{2}".format(sep * level, k, tree_view(v, level + 1, sep=sep) if isinstance(v, dict) else "") for k, v in dictionary.items()])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self, in_dict=None): """ Turn the Namespace and sub Namespaces back into a native python dictionary. :param in_dict: Do not use, for self recursion :return: python dictionary of this Namespace """
in_dict = in_dict if in_dict else self out_dict = dict() for k, v in in_dict.items(): if isinstance(v, Namespace): v = v.to_dict() out_dict[k] = v return out_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self, item, default=None, spliter=",", strip=True, mod=None): """ Return value of key as a list :param item: key of value to transform :param mod: function to map against list :param default: value to return if item does not exist :param spliter: character to split str on :param strip: clean the list with the `strip` :return: list of items """
try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err if strip: item = item.lstrip("[").rstrip("]") out = [x.strip() if strip else x for x in item.split(spliter)] if mod: return list(map(mod, out)) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(url, save_to_file=True, save_dir=".", filename=None, block_size=64000, overwrite=False, quiet=False): """ Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) """
if save_to_file: if not filename: filename = safe_filename(url.split('/')[-1]) if not filename: filename = "downloaded_at_{}.file".format(time.time()) save_location = os.path.abspath(os.path.join(save_dir, filename)) if os.path.exists(save_location) and not overwrite: logger.error("File {0} already exists".format(save_location)) return False else: save_location = "memory" try: request = urlopen(url) except ValueError as err: if not quiet and "unknown url type" in str(err): logger.error("Please make sure URL is formatted correctly and" " starts with http:// or other protocol") raise err except Exception as err: if not quiet: logger.error("Could not download {0} - {1}".format(url, err)) raise err try: kb_size = int(request.headers["Content-Length"]) / 1024 except Exception as err: if not quiet: logger.debug("Could not determine file size - {0}".format(err)) file_size = "(unknown size)" else: file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999 else (kb_size / 1024, "MB")) if not quiet: logger.info("Downloading {0} {1} to {2}".format(url, file_size, save_location)) if save_to_file: with open(save_location, "wb") as f: while True: buffer = request.read(block_size) if not buffer: break f.write(buffer) return save_location else: return request.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def url_to_ips(url, port=None, ipv6=False, connect_type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP, flags=0): """ Provide a list of IP addresses, uses `socket.getaddrinfo` .. code:: python reusables.url_to_ips("example.com", ipv6=True) # ['2606:2800:220:1:248:1893:25c8:1946'] :param url: hostname to resolve to IP addresses :param port: port to send to getaddrinfo :param ipv6: Return IPv6 address if True, otherwise IPv4 :param connect_type: defaults to STREAM connection, can be 0 for all :param proto: defaults to TCP, can be 0 for all :param flags: additional flags to pass :return: list of resolved IPs """
try: results = socket.getaddrinfo(url, port, (socket.AF_INET if not ipv6 else socket.AF_INET6), connect_type, proto, flags) except socket.gaierror: logger.exception("Could not resolve hostname") return [] return list(set([result[-1][0] for result in results]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip_to_url(ip_addr): """ Resolve a hostname based off an IP address. This is very limited and will probably not return any results if it is a shared IP address or an address with improperly setup DNS records. .. code:: python reusables.ip_to_url('93.184.216.34') # example.com # None reusables.ip_to_url('8.8.8.8') # 'google-public-dns-a.google.com' :param ip_addr: IP address to resolve to hostname :return: string of hostname or None """
try: return socket.gethostbyaddr(ip_addr)[0] except (socket.gaierror, socket.herror): logger.exception("Could not resolve hostname")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """Create a background thread for httpd and serve 'forever'"""
self._process = threading.Thread(target=self._background_runner) self._process.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stream_handler(stream=sys.stderr, level=logging.INFO, log_format=log_formats.easy_read): """ Returns a set up stream handler to add to a logger. :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use :return: stream handler """
sh = logging.StreamHandler(stream) sh.setLevel(level) sh.setFormatter(logging.Formatter(log_format)) return sh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_file_handler(file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, handler=logging.FileHandler, **handler_kwargs): """ Set up a file handler to add to a logger. :param file_path: file to write the log to, defaults to out.log :param level: logging level to set handler at :param log_format: formatter to use :param handler: logging handler to use, defaults to FileHandler :param handler_kwargs: options to pass to the handler :return: handler """
fh = handler(file_path, **handler_kwargs) fh.setLevel(level) fh.setFormatter(logging.Formatter(log_format)) return fh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_logger(module_name=None, level=logging.INFO, stream=sys.stderr, file_path=None, log_format=log_formats.easy_read, suppress_warning=True): """ Grabs the specified logger and adds wanted handlers to it. Will default to adding a stream handler. :param module_name: logger name to use :param level: logging level to set logger at :param stream: stream to log to, or None :param file_path: file path to log to, or None :param log_format: format to set the handlers to use :param suppress_warning: add a NullHandler if no other handler is specified :return: configured logger """
new_logger = logging.getLogger(module_name) if stream: new_logger.addHandler(get_stream_handler(stream, level, log_format)) elif not file_path and suppress_warning and not new_logger.handlers: new_logger.addHandler(logging.NullHandler()) if file_path: new_logger.addHandler(get_file_handler(file_path, level, log_format)) if level > 0: new_logger.setLevel(level) return new_logger
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_stream_handler(logger=None, stream=sys.stderr, level=logging.INFO, log_format=log_formats.easy_read): """ Addes a newly created stream handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_stream_handler(stream, level, log_format))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read): """ Addes a newly created file handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: formatter to use """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_rotating_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, max_bytes=10*sizes.mb, backup_count=5, **handler_kwargs): """ Adds a rotating file handler to the specified logger. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param max_bytes: Max file size in bytes before rotating :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format, handler=RotatingFileHandler, maxBytes=max_bytes, backupCount=backup_count, **handler_kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_timed_rotating_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, when='w0', interval=1, backup_count=5, **handler_kwargs): """ Adds a timed rotating file handler to the specified logger. Defaults to weekly rotation, with 5 backups. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param when: :param interval: :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format, handler=TimedRotatingFileHandler, when=when, interval=interval, backupCount=backup_count, **handler_kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_stream_handlers(logger=None): """ Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: # FileHandler is a subclass of StreamHandler so # 'if not a StreamHandler' does not work if (isinstance(handler, logging.FileHandler) or isinstance(handler, logging.NullHandler) or (isinstance(handler, logging.Handler) and not isinstance(handler, logging.StreamHandler))): new_handlers.append(handler) logger.handlers = new_handlers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_file_handlers(logger=None): """ Remove only file handlers from the specified logger. Will go through and close each handler for safety. :param logger: logging name or object to modify, defaults to root logger """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: if isinstance(handler, logging.FileHandler): handler.close() else: new_handlers.append(handler) logger.handlers = new_handlers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_all_handlers(logger=None): """ Safely remove all handlers from the logger :param logger: logging name or object to modify, defaults to root logger """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) remove_file_handlers(logger) logger.handlers = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_logger_levels(logger=None, level=logging.DEBUG): """ Go through the logger and handlers and update their levels to the one specified. :param logger: logging name or object to modify, defaults to root logger :param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error) """
if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.setLevel(level) for handler in logger.handlers: handler.level = level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_registered_loggers(hide_children=False, hide_reusables=False): """ Find the names of all loggers currently registered :param hide_children: only return top level logger names :param hide_reusables: hide the reusables loggers :return: list of logger names """
return [logger for logger in logging.Logger.manager.loggerDict.keys() if not (hide_reusables and "reusables" in logger) and not (hide_children and "." in logger)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unique(max_retries=10, wait=0, alt_return="-no_alt_return-", exception=Exception, error_text=None): """ Wrapper. Makes sure the function's return value has not been returned before or else it run with the same inputs again. .. code: python import reusables import random @reusables.unique(max_retries=100) def poor_uuid(): return random.randint(0, 10) print([poor_uuid() for _ in range(10)]) # [8, 9, 6, 3, 0, 7, 2, 5, 4, 10] print([poor_uuid() for _ in range(100)]) # Exception: No result was unique Message format options: {func} {args} {kwargs} :param max_retries: int of number of retries to attempt before failing :param wait: float of seconds to wait between each try, defaults to 0 :param exception: Exception type of raise :param error_text: text of the exception :param alt_return: if specified, an exception is not raised on failure, instead the provided value of any type of will be returned """
def func_wrap(func): @wraps(func) def wrapper(*args, **kwargs): msg = (error_text if error_text else "No result was unique for function '{func}'") if not error_text: msg = _add_args(msg, *args, **kwargs) for i in range(max_retries): value = func(*args, **kwargs) if value not in unique_cache[func.__name__]: unique_cache[func.__name__].append(value) return value if wait: time.sleep(wait) else: if alt_return != "-no_alt_return-": return alt_return raise exception(msg.format(func=func.__name__, args=args, kwargs=kwargs)) return wrapper return func_wrap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lock_it(lock=g_lock): """ Wrapper. Simple wrapper to make sure a function is only run once at a time. .. code: python import reusables import time def func_one(_): time.sleep(5) @reusables.lock_it() def func_two(_): time.sleep(5) @reusables.time_it(message="test_1 took {0:.2f} seconds") def test_1(): reusables.run_in_pool(func_one, (1, 2, 3), threaded=True) @reusables.time_it(message="test_2 took {0:.2f} seconds") def test_2(): reusables.run_in_pool(func_two, (1, 2, 3), threaded=True) test_1() test_2() # test_1 took 5.04 seconds # test_2 took 15.07 seconds :param lock: Which lock to use, uses unique default """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): with lock: return func(*args, **kwargs) return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def time_it(log=None, message=None, append=None): """ Wrapper. Time the amount of time it takes the execution of the function and print it. If log is true, make sure to set the logging level of 'reusables' to INFO level or lower. .. code:: python import time import reusables reusables.add_stream_handler('reusables') @reusables.time_it(log=True, message="{seconds:.2f} seconds") def test_time(length): time.sleep(length) return "slept {0}".format(length) result = test_time(5) # 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds print(result) # slept 5 Message format options: {func} {seconds} {args} {kwargs} :param log: log as INFO level instead of printing :param message: string to format with total time as the only input :param append: list to append item too """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): # Can't use nonlocal in 2.x msg = (message if message else "Function '{func}' took a total of {seconds} seconds") if not message: msg = _add_args(msg, *args, **kwargs) time_func = (time.perf_counter if python_version >= (3, 3) else time.time) start_time = time_func() try: return func(*args, **kwargs) finally: total_time = time_func() - start_time time_string = msg.format(func=func.__name__, seconds=total_time, args=args, kwargs=kwargs) if log: my_logger = logging.getLogger(log) if isinstance(log, str)\ else logger my_logger.info(time_string) else: print(time_string) if isinstance(append, list): append.append(total_time) return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queue_it(queue=g_queue, **put_args): """ Wrapper. Instead of returning the result of the function, add it to a queue. .. code: python import reusables import queue my_queue = queue.Queue() @reusables.queue_it(my_queue) def func(a): return a func(10) print(my_queue.get()) # 10 :param queue: Queue to add result into """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): queue.put(func(*args, **kwargs), **put_args) return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_exception(log="reusables", message=None, exceptions=(Exception, ), level=logging.ERROR, show_traceback=True): """ Wrapper. Log the traceback to any exceptions raised. Possible to raise custom exception. .. code :: python @reusables.log_exception() def test(): raise Exception("Bad") # 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad # Traceback (most recent call last): # File "<input>", line 1, in <module> # File "reusables\wrappers.py", line 200, in wrapper # raise err # Exception: Bad Message format options: {func} {err} {args} {kwargs} :param exceptions: types of exceptions to catch :param log: log name to use :param message: message to use in log :param level: logging level :param show_traceback: include full traceback or just error message """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): msg = message if message else "Exception in '{func}': {err}" if not message: msg = _add_args(msg, *args, **kwargs) try: return func(*args, **kwargs) except exceptions as err: my_logger = (logging.getLogger(log) if isinstance(log, str) else log) my_logger.log(level, msg.format(func=func.__name__, err=str(err), args=args, kwargs=kwargs), exc_info=show_traceback) raise err return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def catch_it(exceptions=(Exception, ), default=None, handler=None): """ If the function encounters an exception, catch it, and return the specified default or sent to a handler function instead. .. code :: python def handle_error(exception, func, *args, **kwargs): print(f"{func.__name__} raised {exception} when called with {args}") @reusables.catch_it(handler=err_func) def will_raise(message="Hello") raise Exception(message) :param exceptions: tuple of exceptions to catch :param default: what to return if the exception is caught :param handler: function to send exception, func, *args and **kwargs """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exceptions as err: if handler: return handler(err, func, *args, **kwargs) return default return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retry_it(exceptions=(Exception, ), tries=10, wait=0, handler=None, raised_exception=ReusablesError, raised_message=None): """ Retry a function if an exception is raised, or if output_check returns False. Message format options: {func} {args} {kwargs} :param exceptions: tuple of exceptions to catch :param tries: number of tries to retry the function :param wait: time to wait between executions in seconds :param handler: function to check if output is valid, must return bool :param raised_exception: default is ReusablesError :param raised_message: message to pass to raised exception """
def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): msg = (raised_message if raised_message else "Max retries exceeded for function '{func}'") if not raised_message: msg = _add_args(msg, *args, **kwargs) try: result = func(*args, **kwargs) except exceptions: if tries: if wait: time.sleep(wait) return retry_it(exceptions=exceptions, tries=tries-1, handler=handler, wait=wait)(func)(*args, **kwargs) if raised_exception: exc = raised_exception(msg.format(func=func.__name__, args=args, kwargs=kwargs)) exc.__cause__ = None raise exc else: if handler: if not handler(result): return retry_it(exceptions=exceptions, tries=tries - 1, handler=handler, wait=wait)(func)(*args, **kwargs) return result return wrapper return func_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract(archive_file, path=".", delete_on_success=False, enable_rar=False): """ Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files """
if not os.path.exists(archive_file) or not os.path.getsize(archive_file): logger.error("File {0} unextractable".format(archive_file)) raise OSError("File does not exist or has zero size") arch = None if zipfile.is_zipfile(archive_file): logger.debug("File {0} detected as a zip file".format(archive_file)) arch = zipfile.ZipFile(archive_file) elif tarfile.is_tarfile(archive_file): logger.debug("File {0} detected as a tar file".format(archive_file)) arch = tarfile.open(archive_file) elif enable_rar: import rarfile if rarfile.is_rarfile(archive_file): logger.debug("File {0} detected as " "a rar file".format(archive_file)) arch = rarfile.RarFile(archive_file) if not arch: raise TypeError("File is not a known archive") logger.debug("Extracting files to {0}".format(path)) try: arch.extractall(path=path) finally: arch.close() if delete_on_success: logger.debug("Archive {0} will now be deleted".format(archive_file)) os.unlink(archive_file) return os.path.abspath(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_json(json_file, **kwargs): """ Open and load data from a JSON file .. code:: python reusables.load_json("example.json") # {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}} :param json_file: Path to JSON file as string :param kwargs: Additional arguments for the json.load command :return: Dictionary """
with open(json_file) as f: return json.load(f, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_json(data, json_file, indent=4, **kwargs): """ Takes a dictionary and saves it to a file as JSON .. code:: python my_dict = {"key_1": "val_1", "key_for_dict": {"sub_dict_key": 8}} reusables.save_json(my_dict,"example.json") example.json .. code:: { "key_1": "val_1", "key_for_dict": { "sub_dict_key": 8 } } :param data: dictionary to save as JSON :param json_file: Path to save file location as str :param indent: Format the JSON file with so many numbers of spaces :param kwargs: Additional arguments for the json.dump command """
with open(json_file, "w") as f: json.dump(data, f, indent=indent, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_namespace(config_file=None, auto_find=False, verify=True, **cfg_options): """ Return configuration options as a Namespace. .. code:: python reusables.config_namespace(os.path.join("test", "data", "test_config.ini")) :param config_file: path or paths to the files location :param auto_find: look for a config type file at this location or below :param verify: make sure the file exists before trying to read :param cfg_options: options to pass to the parser :return: Namespace of the config files """
return ConfigNamespace(**config_dict(config_file, auto_find, verify, **cfg_options))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _walk(directory, enable_scandir=False, **kwargs): """ Internal function to return walk generator either from os or scandir :param directory: directory to traverse :param enable_scandir: on python < 3.5 enable external scandir package :param kwargs: arguments to pass to walk function :return: walk generator """
walk = os.walk if python_version < (3, 5) and enable_scandir: import scandir walk = scandir.walk return walk(directory, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def os_tree(directory, enable_scandir=False): """ Return a directories contents as a dictionary hierarchy. .. code:: python reusables.os_tree(".") # {'doc': {'build': {'doctrees': {}, # 'html': {'_sources': {}, '_static': {}}}, # 'source': {}}, # 'reusables': {'__pycache__': {}}, # 'test': {'__pycache__': {}, 'data': {}}} :param directory: path to directory to created the tree of. :param enable_scandir: on python < 3.5 enable external scandir package :return: dictionary of the directory """
if not os.path.exists(directory): raise OSError("Directory does not exist") if not os.path.isdir(directory): raise OSError("Path is not a directory") full_list = [] for root, dirs, files in _walk(directory, enable_scandir=enable_scandir): full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep for d in dirs]) tree = {os.path.basename(directory): {}} for item in full_list: separated = item.split(os.sep) is_dir = separated[-1:] == [''] if is_dir: separated = separated[:-1] parent = tree[os.path.basename(directory)] for index, path in enumerate(separated): if path in parent: parent = parent[path] continue else: parent[path] = dict() parent = parent[path] return tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True): """ Hash a given file with md5, or any other and return the hex digest. You can run `hashlib.algorithms_available` to see which are available on your system unless you have an archaic python version, you poor soul). This function is designed to be non memory intensive. .. code:: python reusables.file_hash(test_structure.zip") # '61e387de305201a2c915a4f4277d6663' :param path: location of the file to hash :param hash_type: string name of the hash to use :param block_size: amount of bytes to add to hasher at a time :param hex_digest: returned as hexdigest, false will return digest :return: file's hash """
hashed = hashlib.new(hash_type) with open(path, "rb") as infile: buf = infile.read(block_size) while len(buf) > 0: hashed.update(buf) buf = infile.read(block_size) return hashed.hexdigest() if hex_digest else hashed.digest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_files(directory=".", ext=None, name=None, match_case=False, disable_glob=False, depth=None, abspath=False, enable_scandir=False): """ Walk through a file directory and return an iterator of files that match requirements. Will autodetect if name has glob as magic characters. Note: For the example below, you can use find_files_list to return as a list, this is simply an easy way to show the output. .. code:: python list(reusables.find_files(name="ex", match_case=True)) # ['C:\\example.pdf', # 'C:\\My_exam_score.txt'] list(reusables.find_files(name="*free*")) # ['C:\\my_stuff\\Freedom_fight.pdf'] list(reusables.find_files(ext=".pdf")) # ['C:\\Example.pdf', # 'C:\\how_to_program.pdf', # 'C:\\Hunks_and_Chicks.pdf'] list(reusables.find_files(name="*chris*")) # ['C:\\Christmas_card.docx', # 'C:\\chris_stuff.zip'] :param directory: Top location to recursively search for matching files :param ext: Extensions of the file you are looking for :param name: Part of the file name :param match_case: If name or ext has to be a direct match or not :param disable_glob: Do not look for globable names or use glob magic check :param depth: How many directories down to search :param abspath: Return files with their absolute paths :param enable_scandir: on python < 3.5 enable external scandir package :return: generator of all files in the specified directory """
if ext or not name: disable_glob = True if not disable_glob: disable_glob = not glob.has_magic(name) if ext and isinstance(ext, str): ext = [ext] elif ext and not isinstance(ext, (list, tuple)): raise TypeError("extension must be either one extension or a list") if abspath: directory = os.path.abspath(directory) starting_depth = directory.count(os.sep) for root, dirs, files in _walk(directory, enable_scandir=enable_scandir): if depth and root.count(os.sep) - starting_depth >= depth: continue if not disable_glob: if match_case: raise ValueError("Cannot use glob and match case, please " "either disable glob or not set match_case") glob_generator = glob.iglob(os.path.join(root, name)) for item in glob_generator: yield item continue for file_name in files: if ext: for end in ext: if file_name.lower().endswith(end.lower() if not match_case else end): break else: continue if name: if match_case and name not in file_name: continue elif name.lower() not in file_name.lower(): continue yield os.path.join(root, file_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_empty_directories(root_directory, dry_run=False, ignore_errors=True, enable_scandir=False): """ Remove all empty folders from a path. Returns list of empty directories. :param root_directory: base directory to start at :param dry_run: just return a list of what would be removed :param ignore_errors: Permissions are a pain, just ignore if you blocked :param enable_scandir: on python < 3.5 enable external scandir package :return: list of removed directories """
listdir = os.listdir if python_version < (3, 5) and enable_scandir: import scandir as _scandir def listdir(directory): return list(_scandir.scandir(directory)) directory_list = [] for root, directories, files in _walk(root_directory, enable_scandir=enable_scandir, topdown=False): if (not directories and not files and os.path.exists(root) and root != root_directory and os.path.isdir(root)): directory_list.append(root) if not dry_run: try: os.rmdir(root) except OSError as err: if ignore_errors: logger.info("{0} could not be deleted".format(root)) else: raise err elif directories and not files: for directory in directories: directory = join_paths(root, directory, strict=True) if (os.path.exists(directory) and os.path.isdir(directory) and not listdir(directory)): directory_list.append(directory) if not dry_run: try: os.rmdir(directory) except OSError as err: if ignore_errors: logger.info("{0} could not be deleted".format( directory)) else: raise err return directory_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_empty_files(root_directory, dry_run=False, ignore_errors=True, enable_scandir=False): """ Remove all empty files from a path. Returns list of the empty files removed. :param root_directory: base directory to start at :param dry_run: just return a list of what would be removed :param ignore_errors: Permissions are a pain, just ignore if you blocked :param enable_scandir: on python < 3.5 enable external scandir package :return: list of removed files """
file_list = [] for root, directories, files in _walk(root_directory, enable_scandir=enable_scandir): for file_name in files: file_path = join_paths(root, file_name, strict=True) if os.path.isfile(file_path) and not os.path.getsize(file_path): if file_hash(file_path) == variables.hashes.empty_file.md5: file_list.append(file_path) file_list = sorted(set(file_list)) if not dry_run: for afile in file_list: try: os.unlink(afile) except OSError as err: if ignore_errors: logger.info("File {0} could not be deleted".format(afile)) else: raise err return file_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dup_finder(file_path, directory=".", enable_scandir=False): """ Check a directory for duplicates of the specified file. This is meant for a single file only, for checking a directory for dups, use directory_duplicates. This is designed to be as fast as possible by doing lighter checks before progressing to more extensive ones, in order they are: 1. File size 2. First twenty bytes 3. Full SHA256 compare .. code:: python list(reusables.dup_finder( "test_structure\\files_2\\empty_file")) # ['C:\\Reusables\\test\\data\\fake_dir', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2', # 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file'] :param file_path: Path to file to check for duplicates of :param directory: Directory to dig recursively into to look for duplicates :param enable_scandir: on python < 3.5 enable external scandir package :return: generators """
size = os.path.getsize(file_path) if size == 0: for empty_file in remove_empty_files(directory, dry_run=True): yield empty_file else: with open(file_path, 'rb') as f: first_twenty = f.read(20) file_sha256 = file_hash(file_path, "sha256") for root, directories, files in _walk(directory, enable_scandir=enable_scandir): for each_file in files: test_file = os.path.join(root, each_file) if os.path.getsize(test_file) == size: try: with open(test_file, 'rb') as f: test_first_twenty = f.read(20) except OSError: logger.warning("Could not open file to compare - " "{0}".format(test_file)) else: if first_twenty == test_first_twenty: if file_hash(test_file, "sha256") == file_sha256: yield os.path.abspath(test_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def directory_duplicates(directory, hash_type='md5', **kwargs): """ Find all duplicates in a directory. Will return a list, in that list are lists of duplicate files. .. code: python dups = reusables.directory_duplicates('C:\\Users\\Me\\Pictures') print(len(dups)) # 56 print(dups) # [['C:\\Users\\Me\\Pictures\\IMG_20161127.jpg', :param directory: Directory to search :param hash_type: Type of hash to perform :param kwargs: Arguments to pass to find_files to narrow file types :return: list of lists of dups"""
size_map, hash_map = defaultdict(list), defaultdict(list) for item in find_files(directory, **kwargs): file_size = os.path.getsize(item) size_map[file_size].append(item) for possible_dups in (v for v in size_map.values() if len(v) > 1): for each_item in possible_dups: item_hash = file_hash(each_item, hash_type=hash_type) hash_map[item_hash].append(each_item) return [v for v in hash_map.values() if len(v) > 1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_paths(*paths, **kwargs): """ Join multiple paths together and return the absolute path of them. If 'safe' is specified, this function will 'clean' the path with the 'safe_path' function. This will clean root decelerations from the path after the first item. Would like to do 'safe=False' instead of '**kwargs' but stupider versions of python *cough 2.6* don't like that after '*paths'. .. code: python reusables.join_paths("var", "\\log", "/test") 'C:\\Users\\Me\\var\\log\\test' os.path.join("var", "\\log", "/test") '/test' :param paths: paths to join together :param kwargs: 'safe', make them into a safe path it True :return: abspath as string """
path = os.path.abspath(paths[0]) for next_path in paths[1:]: path = os.path.join(path, next_path.lstrip("\\").lstrip("/").strip()) path.rstrip(os.sep) return path if not kwargs.get('safe') else safe_path(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_here(*paths, **kwargs): """ Join any path or paths as a sub directory of the current file's directory. .. code:: python reusables.join_here("Makefile") # 'C:\\Reusables\\Makefile' :param paths: paths to join together :param kwargs: 'strict', do not strip os.sep :param kwargs: 'safe', make them into a safe path it True :return: abspath as string """
path = os.path.abspath(".") for next_path in paths: next_path = next_path.lstrip("\\").lstrip("/").strip() if not \ kwargs.get('strict') else next_path path = os.path.abspath(os.path.join(path, next_path)) return path if not kwargs.get('safe') else safe_path(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def safe_path(path, replacement="_"): """ Replace unsafe path characters with underscores. Do NOT use this with existing paths that cannot be modified, this to to help generate new, clean paths. Supports windows and *nix systems. :param path: path as a string :param replacement: character to use in place of bad characters :return: a safer path """
if not isinstance(path, str): raise TypeError("path must be a string") if os.sep not in path: return safe_filename(path, replacement=replacement) filename = safe_filename(os.path.basename(path)) dirname = os.path.dirname(path) safe_dirname = "" regexp = regex.path.windows.safe if win_based else regex.path.linux.safe if win_based and dirname.find(":\\") == 1 and dirname[0].isalpha(): safe_dirname = dirname[0:3] dirname = dirname[3:] if regexp.search(dirname) and check_filename(filename): return path else: for char in dirname: safe_dirname += char if regexp.search(char) else replacement sanitized_path = os.path.normpath("{path}{sep}{filename}".format( path=safe_dirname, sep=os.sep if not safe_dirname.endswith(os.sep) else "", filename=filename)) if (not filename and path.endswith(os.sep) and not sanitized_path.endswith(os.sep)): sanitized_path += os.sep return sanitized_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def change_task_size(self, size): """Blocking request to change number of running tasks"""
self._pause.value = True self.log.debug("About to change task size to {0}".format(size)) try: size = int(size) except ValueError: self.log.error("Cannot change task size, non integer size provided") return False if size < 0: self.log.error("Cannot change task size, less than 0 size provided") return False self.max_tasks = size if size < self.max_tasks: diff = self.max_tasks - size self.log.debug("Reducing size offset by {0}".format(diff)) while True: self._update_tasks() if len(self.free_tasks) >= diff: for i in range(diff): task_id = self.free_tasks.pop(0) del self.current_tasks[task_id] break time.sleep(0.5) if not size: self._reset_and_pause() return True elif size > self.max_tasks: diff = size - self.max_tasks for i in range(diff): task_id = str(uuid.uuid4()) self.current_tasks[task_id] = {} self.free_tasks.append(task_id) self._pause.value = False self.log.debug("Task size changed to {0}".format(size)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self): """Hard stop the server and sub process"""
self._end.value = True if self.background_process: try: self.background_process.terminate() except Exception: pass for task_id, values in self.current_tasks.items(): try: values['proc'].terminate() except Exception: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_state(self): """Get general information about the state of the class"""
return {"started": (True if self.background_process and self.background_process.is_alive() else False), "paused": self._pause.value, "stopped": self._end.value, "tasks": len(self.current_tasks), "busy_tasks": len(self.busy_tasks), "free_tasks": len(self.free_tasks)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main_loop(self, stop_at_empty=False): """Blocking function that can be run directly, if so would probably want to specify 'stop_at_empty' to true, or have a separate process adding items to the queue. """
try: while True: self.hook_pre_command() self._check_command_queue() if self.run_until and self.run_until < datetime.datetime.now(): self.log.info("Time limit reached") break if self._end.value: break if self._pause.value: time.sleep(.5) continue self.hook_post_command() self._update_tasks() task_id = self._free_task() if task_id: try: task = self.task_queue.get(timeout=.1) except queue.Empty: if stop_at_empty: break self._return_task(task_id) else: self.hook_pre_task() self.log.debug("Starting task on {0}".format(task_id)) try: self._start_task(task_id, task) except Exception as err: self.log.exception("Could not start task {0} -" " {1}".format(task_id, err)) else: self.hook_post_task() finally: self.log.info("Ending main loop")